LLVM  14.0.0git
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
42 #include "llvm/CodeGen/Spiller.h"
43 #include "llvm/CodeGen/StackMaps.h"
49 #include "llvm/Config/llvm-config.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
57 #include <cassert>
58 #include <iterator>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "regalloc"
66 
67 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
68 STATISTIC(NumSnippets, "Number of spilled snippets");
69 STATISTIC(NumSpills, "Number of spills inserted");
70 STATISTIC(NumSpillsRemoved, "Number of spills removed");
71 STATISTIC(NumReloads, "Number of reloads inserted");
72 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
73 STATISTIC(NumFolded, "Number of folded stack accesses");
74 STATISTIC(NumFoldedLoads, "Number of folded loads");
75 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
76 
77 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78  cl::desc("Disable inline spill hoisting"));
79 static cl::opt<bool>
80 RestrictStatepointRemat("restrict-statepoint-remat",
81  cl::init(false), cl::Hidden,
82  cl::desc("Restrict remat for statepoint operands"));
83 
84 namespace {
85 
86 class HoistSpillHelper : private LiveRangeEdit::Delegate {
87  MachineFunction &MF;
88  LiveIntervals &LIS;
89  LiveStacks &LSS;
90  AliasAnalysis *AA;
93  VirtRegMap &VRM;
95  const TargetInstrInfo &TII;
96  const TargetRegisterInfo &TRI;
97  const MachineBlockFrequencyInfo &MBFI;
98 
100 
101  // Map from StackSlot to the LiveInterval of the original register.
102  // Note the LiveInterval of the original register may have been deleted
103  // after it is spilled. We keep a copy here to track the range where
104  // spills can be moved.
106 
107  // Map from pair of (StackSlot and Original VNI) to a set of spills which
108  // have the same stackslot and have equal values defined by Original VNI.
109  // These spills are mergeable and are hoist candiates.
110  using MergeableSpillsMap =
112  MergeableSpillsMap MergeableSpills;
113 
114  /// This is the map from original register to a set containing all its
115  /// siblings. To hoist a spill to another BB, we need to find out a live
116  /// sibling there and use it as the source of the new spill.
118 
119  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
120  MachineBasicBlock &BB, Register &LiveReg);
121 
122  void rmRedundantSpills(
126 
127  void getVisitOrders(
133 
134  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
138 
139 public:
140  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
141  VirtRegMap &vrm)
142  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
143  LSS(pass.getAnalysis<LiveStacks>()),
144  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
145  MDT(pass.getAnalysis<MachineDominatorTree>()),
146  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
147  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
148  TRI(*mf.getSubtarget().getRegisterInfo()),
149  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
150  IPA(LIS, mf.getNumBlockIDs()) {}
151 
152  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
153  unsigned Original);
154  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
155  void hoistAllSpills();
156  void LRE_DidCloneVirtReg(Register, Register) override;
157 };
158 
159 class InlineSpiller : public Spiller {
160  MachineFunction &MF;
161  LiveIntervals &LIS;
162  LiveStacks &LSS;
163  AliasAnalysis *AA;
166  VirtRegMap &VRM;
168  const TargetInstrInfo &TII;
169  const TargetRegisterInfo &TRI;
170  const MachineBlockFrequencyInfo &MBFI;
171 
172  // Variables that are valid during spill(), but used by multiple methods.
173  LiveRangeEdit *Edit;
174  LiveInterval *StackInt;
175  int StackSlot;
176  Register Original;
177 
178  // All registers to spill to StackSlot, including the main register.
179  SmallVector<Register, 8> RegsToSpill;
180 
181  // All COPY instructions to/from snippets.
182  // They are ignored since both operands refer to the same stack slot.
183  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
184 
185  // Values that failed to remat at some point.
186  SmallPtrSet<VNInfo*, 8> UsedValues;
187 
188  // Dead defs generated during spilling.
190 
191  // Object records spills information and does the hoisting.
192  HoistSpillHelper HSpiller;
193 
194  // Live range weight calculator.
195  VirtRegAuxInfo &VRAI;
196 
197  ~InlineSpiller() override = default;
198 
199 public:
200  InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
201  VirtRegAuxInfo &VRAI)
202  : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
203  LSS(Pass.getAnalysis<LiveStacks>()),
204  AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
205  MDT(Pass.getAnalysis<MachineDominatorTree>()),
206  Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
207  MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
208  TRI(*MF.getSubtarget().getRegisterInfo()),
209  MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
210  HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
211 
212  void spill(LiveRangeEdit &) override;
213  void postOptimization() override;
214 
215 private:
216  bool isSnippet(const LiveInterval &SnipLI);
217  void collectRegsToSpill();
218 
219  bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
220 
221  bool isSibling(Register Reg);
222  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
223  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
224 
225  void markValueUsed(LiveInterval*, VNInfo*);
226  bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
227  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
228  void reMaterializeAll();
229 
230  bool coalesceStackAccess(MachineInstr *MI, Register Reg);
231  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
232  MachineInstr *LoadMI = nullptr);
233  void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
234  void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
235 
236  void spillAroundUses(Register Reg);
237  void spillAll();
238 };
239 
240 } // end anonymous namespace
241 
242 Spiller::~Spiller() = default;
243 
244 void Spiller::anchor() {}
245 
247  MachineFunction &MF, VirtRegMap &VRM,
248  VirtRegAuxInfo &VRAI) {
249  return new InlineSpiller(Pass, MF, VRM, VRAI);
250 }
251 
252 //===----------------------------------------------------------------------===//
253 // Snippets
254 //===----------------------------------------------------------------------===//
255 
256 // When spilling a virtual register, we also spill any snippets it is connected
257 // to. The snippets are small live ranges that only have a single real use,
258 // leftovers from live range splitting. Spilling them enables memory operand
259 // folding or tightens the live range around the single use.
260 //
261 // This minimizes register pressure and maximizes the store-to-load distance for
262 // spill slots which can be important in tight loops.
263 
264 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
265 /// otherwise return 0.
267  if (!MI.isFullCopy())
268  return Register();
269  if (MI.getOperand(0).getReg() == Reg)
270  return MI.getOperand(1).getReg();
271  if (MI.getOperand(1).getReg() == Reg)
272  return MI.getOperand(0).getReg();
273  return Register();
274 }
275 
276 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
277  for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
278  const MachineOperand &MO = MI.getOperand(I);
279  if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
280  LIS.getInterval(MO.getReg());
281  }
282 }
283 
284 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
285 /// It is assumed that SnipLI is a virtual register with the same original as
286 /// Edit->getReg().
287 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
288  Register Reg = Edit->getReg();
289 
290  // A snippet is a tiny live range with only a single instruction using it
291  // besides copies to/from Reg or spills/fills. We accept:
292  //
293  // %snip = COPY %Reg / FILL fi#
294  // %snip = USE %snip
295  // %Reg = COPY %snip / SPILL %snip, fi#
296  //
297  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
298  return false;
299 
300  MachineInstr *UseMI = nullptr;
301 
302  // Check that all uses satisfy our criteria.
304  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
306  RI != E;) {
307  MachineInstr &MI = *RI++;
308 
309  // Allow copies to/from Reg.
310  if (isFullCopyOf(MI, Reg))
311  continue;
312 
313  // Allow stack slot loads.
314  int FI;
315  if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
316  continue;
317 
318  // Allow stack slot stores.
319  if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
320  continue;
321 
322  // Allow a single additional instruction.
323  if (UseMI && &MI != UseMI)
324  return false;
325  UseMI = &MI;
326  }
327  return true;
328 }
329 
330 /// collectRegsToSpill - Collect live range snippets that only have a single
331 /// real use.
332 void InlineSpiller::collectRegsToSpill() {
333  Register Reg = Edit->getReg();
334 
335  // Main register always spills.
336  RegsToSpill.assign(1, Reg);
337  SnippetCopies.clear();
338 
339  // Snippets all have the same original, so there can't be any for an original
340  // register.
341  if (Original == Reg)
342  return;
343 
345  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
346  MachineInstr &MI = *RI++;
347  Register SnipReg = isFullCopyOf(MI, Reg);
348  if (!isSibling(SnipReg))
349  continue;
350  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
351  if (!isSnippet(SnipLI))
352  continue;
353  SnippetCopies.insert(&MI);
354  if (isRegToSpill(SnipReg))
355  continue;
356  RegsToSpill.push_back(SnipReg);
357  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
358  ++NumSnippets;
359  }
360 }
361 
362 bool InlineSpiller::isSibling(Register Reg) {
363  return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
364 }
365 
366 /// It is beneficial to spill to earlier place in the same BB in case
367 /// as follows:
368 /// There is an alternative def earlier in the same MBB.
369 /// Hoist the spill as far as possible in SpillMBB. This can ease
370 /// register pressure:
371 ///
372 /// x = def
373 /// y = use x
374 /// s = copy x
375 ///
376 /// Hoisting the spill of s to immediately after the def removes the
377 /// interference between x and y:
378 ///
379 /// x = def
380 /// spill x
381 /// y = use killed x
382 ///
383 /// This hoist only helps when the copy kills its source.
384 ///
385 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
386  MachineInstr &CopyMI) {
387  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
388 #ifndef NDEBUG
389  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
390  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
391 #endif
392 
393  Register SrcReg = CopyMI.getOperand(1).getReg();
394  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
395  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
396  LiveQueryResult SrcQ = SrcLI.Query(Idx);
397  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
398  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
399  return false;
400 
401  // Conservatively extend the stack slot range to the range of the original
402  // value. We may be able to do better with stack slot coloring by being more
403  // careful here.
404  assert(StackInt && "No stack slot assigned yet.");
405  LiveInterval &OrigLI = LIS.getInterval(Original);
406  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
407  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
408  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
409  << *StackInt << '\n');
410 
411  // We are going to spill SrcVNI immediately after its def, so clear out
412  // any later spills of the same value.
413  eliminateRedundantSpills(SrcLI, SrcVNI);
414 
415  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
417  if (SrcVNI->isPHIDef())
418  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
419  else {
420  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
421  assert(DefMI && "Defining instruction disappeared");
422  MII = DefMI;
423  ++MII;
424  }
425  MachineInstrSpan MIS(MII, MBB);
426  // Insert spill without kill flag immediately after def.
427  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
428  MRI.getRegClass(SrcReg), &TRI);
429  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
430  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
431  getVDefInterval(MI, LIS);
432  --MII; // Point to store instruction.
433  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
434 
435  // If there is only 1 store instruction is required for spill, add it
436  // to mergeable list. In X86 AMX, 2 intructions are required to store.
437  // We disable the merge for this case.
438  if (MIS.begin() == MII)
439  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
440  ++NumSpills;
441  return true;
442 }
443 
444 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
445 /// redundant spills of this value in SLI.reg and sibling copies.
446 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
447  assert(VNI && "Missing value");
449  WorkList.push_back(std::make_pair(&SLI, VNI));
450  assert(StackInt && "No stack slot assigned yet.");
451 
452  do {
453  LiveInterval *LI;
454  std::tie(LI, VNI) = WorkList.pop_back_val();
455  Register Reg = LI->reg();
456  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
457  << VNI->def << " in " << *LI << '\n');
458 
459  // Regs to spill are taken care of.
460  if (isRegToSpill(Reg))
461  continue;
462 
463  // Add all of VNI's live range to StackInt.
464  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
465  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
466 
467  // Find all spills and copies of VNI.
470  UI != E; ) {
471  MachineInstr &MI = *UI++;
472  if (!MI.isCopy() && !MI.mayStore())
473  continue;
474  SlotIndex Idx = LIS.getInstructionIndex(MI);
475  if (LI->getVNInfoAt(Idx) != VNI)
476  continue;
477 
478  // Follow sibling copies down the dominator tree.
479  if (Register DstReg = isFullCopyOf(MI, Reg)) {
480  if (isSibling(DstReg)) {
481  LiveInterval &DstLI = LIS.getInterval(DstReg);
482  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
483  assert(DstVNI && "Missing defined value");
484  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
485  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
486  }
487  continue;
488  }
489 
490  // Erase spills.
491  int FI;
492  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
493  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
494  // eliminateDeadDefs won't normally remove stores, so switch opcode.
495  MI.setDesc(TII.get(TargetOpcode::KILL));
496  DeadDefs.push_back(&MI);
497  ++NumSpillsRemoved;
498  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
499  --NumSpills;
500  }
501  }
502  } while (!WorkList.empty());
503 }
504 
505 //===----------------------------------------------------------------------===//
506 // Rematerialization
507 //===----------------------------------------------------------------------===//
508 
509 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
510 /// instruction cannot be eliminated. See through snippet copies
511 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
513  WorkList.push_back(std::make_pair(LI, VNI));
514  do {
515  std::tie(LI, VNI) = WorkList.pop_back_val();
516  if (!UsedValues.insert(VNI).second)
517  continue;
518 
519  if (VNI->isPHIDef()) {
520  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
521  for (MachineBasicBlock *P : MBB->predecessors()) {
522  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
523  if (PVNI)
524  WorkList.push_back(std::make_pair(LI, PVNI));
525  }
526  continue;
527  }
528 
529  // Follow snippet copies.
530  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
531  if (!SnippetCopies.count(MI))
532  continue;
533  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
534  assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
535  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
536  assert(SnipVNI && "Snippet undefined before copy");
537  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
538  } while (!WorkList.empty());
539 }
540 
541 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
542  MachineInstr &MI) {
544  return true;
545  // Here's a quick explanation of the problem we're trying to handle here:
546  // * There are some pseudo instructions with more vreg uses than there are
547  // physical registers on the machine.
548  // * This is normally handled by spilling the vreg, and folding the reload
549  // into the user instruction. (Thus decreasing the number of used vregs
550  // until the remainder can be assigned to physregs.)
551  // * However, since we may try to spill vregs in any order, we can end up
552  // trying to spill each operand to the instruction, and then rematting it
553  // instead. When that happens, the new live intervals (for the remats) are
554  // expected to be trivially assignable (i.e. RS_Done). However, since we
555  // may have more remats than physregs, we're guaranteed to fail to assign
556  // one.
557  // At the moment, we only handle this for STATEPOINTs since they're the only
558  // pseudo op where we've seen this. If we start seeing other instructions
559  // with the same problem, we need to revisit this.
560  if (MI.getOpcode() != TargetOpcode::STATEPOINT)
561  return true;
562  // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
563  // that number of physical registers is enough to cover all fixed arguments.
564  // If it is not true we need to revisit it.
565  for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
566  EndIdx = MI.getNumOperands();
567  Idx < EndIdx; ++Idx) {
568  MachineOperand &MO = MI.getOperand(Idx);
569  if (MO.isReg() && MO.getReg() == VReg)
570  return false;
571  }
572  return true;
573 }
574 
575 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
576 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
577  // Analyze instruction
579  VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
580 
581  if (!RI.Reads)
582  return false;
583 
584  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
585  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
586 
587  if (!ParentVNI) {
588  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
589  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
590  MachineOperand &MO = MI.getOperand(i);
591  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
592  MO.setIsUndef();
593  }
594  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
595  return true;
596  }
597 
598  if (SnippetCopies.count(&MI))
599  return false;
600 
601  LiveInterval &OrigLI = LIS.getInterval(Original);
602  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
603  LiveRangeEdit::Remat RM(ParentVNI);
604  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
605 
606  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
607  markValueUsed(&VirtReg, ParentVNI);
608  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
609  return false;
610  }
611 
612  // If the instruction also writes VirtReg.reg, it had better not require the
613  // same register for uses and defs.
614  if (RI.Tied) {
615  markValueUsed(&VirtReg, ParentVNI);
616  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
617  return false;
618  }
619 
620  // Before rematerializing into a register for a single instruction, try to
621  // fold a load into the instruction. That avoids allocating a new register.
622  if (RM.OrigMI->canFoldAsLoad() &&
623  foldMemoryOperand(Ops, RM.OrigMI)) {
624  Edit->markRematerialized(RM.ParentVNI);
625  ++NumFoldedLoads;
626  return true;
627  }
628 
629  // If we can't guarantee that we'll be able to actually assign the new vreg,
630  // we can't remat.
631  if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
632  markValueUsed(&VirtReg, ParentVNI);
633  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
634  return false;
635  }
636 
637  // Allocate a new register for the remat.
638  Register NewVReg = Edit->createFrom(Original);
639 
640  // Finally we can rematerialize OrigMI before MI.
641  SlotIndex DefIdx =
642  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
643 
644  // We take the DebugLoc from MI, since OrigMI may be attributed to a
645  // different source location.
646  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
647  NewMI->setDebugLoc(MI.getDebugLoc());
648 
649  (void)DefIdx;
650  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
651  << *LIS.getInstructionFromIndex(DefIdx));
652 
653  // Replace operands
654  for (const auto &OpPair : Ops) {
655  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
656  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
657  MO.setReg(NewVReg);
658  MO.setIsKill();
659  }
660  }
661  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
662 
663  ++NumRemats;
664  return true;
665 }
666 
667 /// reMaterializeAll - Try to rematerialize as many uses as possible,
668 /// and trim the live ranges after.
669 void InlineSpiller::reMaterializeAll() {
670  if (!Edit->anyRematerializable(AA))
671  return;
672 
673  UsedValues.clear();
674 
675  // Try to remat before all uses of snippets.
676  bool anyRemat = false;
677  for (Register Reg : RegsToSpill) {
678  LiveInterval &LI = LIS.getInterval(Reg);
681  RegI != E; ) {
682  MachineInstr &MI = *RegI++;
683 
684  // Debug values are not allowed to affect codegen.
685  if (MI.isDebugValue())
686  continue;
687 
688  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
689  "instruction that isn't a DBG_VALUE");
690 
691  anyRemat |= reMaterializeFor(LI, MI);
692  }
693  }
694  if (!anyRemat)
695  return;
696 
697  // Remove any values that were completely rematted.
698  for (Register Reg : RegsToSpill) {
699  LiveInterval &LI = LIS.getInterval(Reg);
700  for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
701  I != E; ++I) {
702  VNInfo *VNI = *I;
703  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
704  continue;
705  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
706  MI->addRegisterDead(Reg, &TRI);
707  if (!MI->allDefsAreDead())
708  continue;
709  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
710  DeadDefs.push_back(MI);
711  }
712  }
713 
714  // Eliminate dead code after remat. Note that some snippet copies may be
715  // deleted here.
716  if (DeadDefs.empty())
717  return;
718  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
719  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
720 
721  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
722  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
723  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
724  // removed, PHI VNI are still left in the LiveInterval.
725  // So to get rid of unused reg, we need to check whether it has non-dbg
726  // reference instead of whether it has non-empty interval.
727  unsigned ResultPos = 0;
728  for (Register Reg : RegsToSpill) {
729  if (MRI.reg_nodbg_empty(Reg)) {
730  Edit->eraseVirtReg(Reg);
731  continue;
732  }
733 
734  assert(LIS.hasInterval(Reg) &&
735  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
736  "Empty and not used live-range?!");
737 
738  RegsToSpill[ResultPos++] = Reg;
739  }
740  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
741  LLVM_DEBUG(dbgs() << RegsToSpill.size()
742  << " registers to spill after remat.\n");
743 }
744 
745 //===----------------------------------------------------------------------===//
746 // Spilling
747 //===----------------------------------------------------------------------===//
748 
749 /// If MI is a load or store of StackSlot, it can be removed.
750 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
751  int FI = 0;
752  Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
753  bool IsLoad = InstrReg;
754  if (!IsLoad)
755  InstrReg = TII.isStoreToStackSlot(*MI, FI);
756 
757  // We have a stack access. Is it the right register and slot?
758  if (InstrReg != Reg || FI != StackSlot)
759  return false;
760 
761  if (!IsLoad)
762  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
763 
764  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
765  LIS.RemoveMachineInstrFromMaps(*MI);
766  MI->eraseFromParent();
767 
768  if (IsLoad) {
769  ++NumReloadsRemoved;
770  --NumReloads;
771  } else {
772  ++NumSpillsRemoved;
773  --NumSpills;
774  }
775 
776  return true;
777 }
778 
779 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
781 // Dump the range of instructions from B to E with their slot indexes.
784  LiveIntervals const &LIS,
785  const char *const header,
786  Register VReg = Register()) {
787  char NextLine = '\n';
788  char SlotIndent = '\t';
789 
790  if (std::next(B) == E) {
791  NextLine = ' ';
792  SlotIndent = ' ';
793  }
794 
795  dbgs() << '\t' << header << ": " << NextLine;
796 
797  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
798  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
799 
800  // If a register was passed in and this instruction has it as a
801  // destination that is marked as an early clobber, print the
802  // early-clobber slot index.
803  if (VReg) {
804  MachineOperand *MO = I->findRegisterDefOperand(VReg);
805  if (MO && MO->isEarlyClobber())
806  Idx = Idx.getRegSlot(true);
807  }
808 
809  dbgs() << SlotIndent << Idx << '\t' << *I;
810  }
811 }
812 #endif
813 
814 /// foldMemoryOperand - Try folding stack slot references in Ops into their
815 /// instructions.
816 ///
817 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
818 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
819 /// @return True on success.
820 bool InlineSpiller::
821 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
822  MachineInstr *LoadMI) {
823  if (Ops.empty())
824  return false;
825  // Don't attempt folding in bundles.
826  MachineInstr *MI = Ops.front().first;
827  if (Ops.back().first != MI || MI->isBundled())
828  return false;
829 
830  bool WasCopy = MI->isCopy();
831  Register ImpReg;
832 
833  // TII::foldMemoryOperand will do what we need here for statepoint
834  // (fold load into use and remove corresponding def). We will replace
835  // uses of removed def with loads (spillAroundUses).
836  // For that to work we need to untie def and use to pass it through
837  // foldMemoryOperand and signal foldPatchpoint that it is allowed to
838  // fold them.
839  bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
840 
841  // Spill subregs if the target allows it.
842  // We always want to spill subregs for stackmap/patchpoint pseudos.
843  bool SpillSubRegs = TII.isSubregFoldable() ||
844  MI->getOpcode() == TargetOpcode::STATEPOINT ||
845  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
846  MI->getOpcode() == TargetOpcode::STACKMAP;
847 
848  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
849  // operands.
850  SmallVector<unsigned, 8> FoldOps;
851  for (const auto &OpPair : Ops) {
852  unsigned Idx = OpPair.second;
853  assert(MI == OpPair.first && "Instruction conflict during operand folding");
854  MachineOperand &MO = MI->getOperand(Idx);
855  if (MO.isImplicit()) {
856  ImpReg = MO.getReg();
857  continue;
858  }
859 
860  if (!SpillSubRegs && MO.getSubReg())
861  return false;
862  // We cannot fold a load instruction into a def.
863  if (LoadMI && MO.isDef())
864  return false;
865  // Tied use operands should not be passed to foldMemoryOperand.
866  if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
867  FoldOps.push_back(Idx);
868  }
869 
870  // If we only have implicit uses, we won't be able to fold that.
871  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
872  if (FoldOps.empty())
873  return false;
874 
875  MachineInstrSpan MIS(MI, MI->getParent());
876 
878  if (UntieRegs)
879  for (unsigned Idx : FoldOps) {
880  MachineOperand &MO = MI->getOperand(Idx);
881  if (!MO.isTied())
882  continue;
883  unsigned Tied = MI->findTiedOperandIdx(Idx);
884  if (MO.isUse())
885  TiedOps.emplace_back(Tied, Idx);
886  else {
887  assert(MO.isDef() && "Tied to not use and def?");
888  TiedOps.emplace_back(Idx, Tied);
889  }
890  MI->untieRegOperand(Idx);
891  }
892 
893  MachineInstr *FoldMI =
894  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
895  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
896  if (!FoldMI) {
897  // Re-tie operands.
898  for (auto Tied : TiedOps)
899  MI->tieOperands(Tied.first, Tied.second);
900  return false;
901  }
902 
903  // Remove LIS for any dead defs in the original MI not in FoldMI.
904  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
905  if (!MO->isReg())
906  continue;
907  Register Reg = MO->getReg();
909  continue;
910  }
911  // Skip non-Defs, including undef uses and internal reads.
912  if (MO->isUse())
913  continue;
914  PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
915  if (RI.FullyDefined)
916  continue;
917  // FoldMI does not define this physreg. Remove the LI segment.
918  assert(MO->isDead() && "Cannot fold physreg def");
919  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
920  LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
921  }
922 
923  int FI;
924  if (TII.isStoreToStackSlot(*MI, FI) &&
925  HSpiller.rmFromMergeableSpills(*MI, FI))
926  --NumSpills;
927  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
928  // Update the call site info.
929  if (MI->isCandidateForCallSiteEntry())
930  MI->getMF()->moveCallSiteInfo(MI, FoldMI);
931  MI->eraseFromParent();
932 
933  // Insert any new instructions other than FoldMI into the LIS maps.
934  assert(!MIS.empty() && "Unexpected empty span of instructions!");
935  for (MachineInstr &MI : MIS)
936  if (&MI != FoldMI)
938 
939  // TII.foldMemoryOperand may have left some implicit operands on the
940  // instruction. Strip them.
941  if (ImpReg)
942  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
943  MachineOperand &MO = FoldMI->getOperand(i - 1);
944  if (!MO.isReg() || !MO.isImplicit())
945  break;
946  if (MO.getReg() == ImpReg)
947  FoldMI->RemoveOperand(i - 1);
948  }
949 
950  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
951  "folded"));
952 
953  if (!WasCopy)
954  ++NumFolded;
955  else if (Ops.front().second == 0) {
956  ++NumSpills;
957  // If there is only 1 store instruction is required for spill, add it
958  // to mergeable list. In X86 AMX, 2 intructions are required to store.
959  // We disable the merge for this case.
960  if (std::distance(MIS.begin(), MIS.end()) <= 1)
961  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
962  } else
963  ++NumReloads;
964  return true;
965 }
966 
967 void InlineSpiller::insertReload(Register NewVReg,
968  SlotIndex Idx,
970  MachineBasicBlock &MBB = *MI->getParent();
971 
972  MachineInstrSpan MIS(MI, &MBB);
973  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
974  MRI.getRegClass(NewVReg), &TRI);
975 
976  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
977 
978  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
979  NewVReg));
980  ++NumReloads;
981 }
982 
983 /// Check if \p Def fully defines a VReg with an undefined value.
984 /// If that's the case, that means the value of VReg is actually
985 /// not relevant.
986 static bool isRealSpill(const MachineInstr &Def) {
987  if (!Def.isImplicitDef())
988  return true;
989  assert(Def.getNumOperands() == 1 &&
990  "Implicit def with more than one definition");
991  // We can say that the VReg defined by Def is undef, only if it is
992  // fully defined by Def. Otherwise, some of the lanes may not be
993  // undef and the value of the VReg matters.
994  return Def.getOperand(0).getSubReg();
995 }
996 
997 /// insertSpill - Insert a spill of NewVReg after MI.
998 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1000  // Spill are not terminators, so inserting spills after terminators will
1001  // violate invariants in MachineVerifier.
1002  assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1003  MachineBasicBlock &MBB = *MI->getParent();
1004 
1005  MachineInstrSpan MIS(MI, &MBB);
1006  MachineBasicBlock::iterator SpillBefore = std::next(MI);
1007  bool IsRealSpill = isRealSpill(*MI);
1008 
1009  if (IsRealSpill)
1010  TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1011  MRI.getRegClass(NewVReg), &TRI);
1012  else
1013  // Don't spill undef value.
1014  // Anything works for undef, in particular keeping the memory
1015  // uninitialized is a viable option and it saves code size and
1016  // run time.
1017  BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1018  .addReg(NewVReg, getKillRegState(isKill));
1019 
1020  MachineBasicBlock::iterator Spill = std::next(MI);
1021  LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1022  for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1023  getVDefInterval(MI, LIS);
1024 
1025  LLVM_DEBUG(
1026  dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1027  ++NumSpills;
1028  // If there is only 1 store instruction is required for spill, add it
1029  // to mergeable list. In X86 AMX, 2 intructions are required to store.
1030  // We disable the merge for this case.
1031  if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1032  HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1033 }
1034 
1035 /// spillAroundUses - insert spill code around each use of Reg.
1036 void InlineSpiller::spillAroundUses(Register Reg) {
1037  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1038  LiveInterval &OldLI = LIS.getInterval(Reg);
1039 
1040  // Iterate over instructions using Reg.
1042  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
1043  RegI != E; ) {
1044  MachineInstr *MI = &*(RegI++);
1045 
1046  // Debug values are not allowed to affect codegen.
1047  if (MI->isDebugValue()) {
1048  // Modify DBG_VALUE now that the value is in a spill slot.
1049  MachineBasicBlock *MBB = MI->getParent();
1050  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
1051  buildDbgValueForSpill(*MBB, MI, *MI, StackSlot, Reg);
1052  MBB->erase(MI);
1053  continue;
1054  }
1055 
1056  assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
1057  "instruction that isn't a DBG_VALUE");
1058 
1059  // Ignore copies to/from snippets. We'll delete them.
1060  if (SnippetCopies.count(MI))
1061  continue;
1062 
1063  // Stack slot accesses may coalesce away.
1064  if (coalesceStackAccess(MI, Reg))
1065  continue;
1066 
1067  // Analyze instruction.
1069  VirtRegInfo RI = AnalyzeVirtRegInBundle(*MI, Reg, &Ops);
1070 
1071  // Find the slot index where this instruction reads and writes OldLI.
1072  // This is usually the def slot, except for tied early clobbers.
1073  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1074  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1075  if (SlotIndex::isSameInstr(Idx, VNI->def))
1076  Idx = VNI->def;
1077 
1078  // Check for a sibling copy.
1079  Register SibReg = isFullCopyOf(*MI, Reg);
1080  if (SibReg && isSibling(SibReg)) {
1081  // This may actually be a copy between snippets.
1082  if (isRegToSpill(SibReg)) {
1083  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1084  SnippetCopies.insert(MI);
1085  continue;
1086  }
1087  if (RI.Writes) {
1088  if (hoistSpillInsideBB(OldLI, *MI)) {
1089  // This COPY is now dead, the value is already in the stack slot.
1090  MI->getOperand(0).setIsDead();
1091  DeadDefs.push_back(MI);
1092  continue;
1093  }
1094  } else {
1095  // This is a reload for a sib-reg copy. Drop spills downstream.
1096  LiveInterval &SibLI = LIS.getInterval(SibReg);
1097  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1098  // The COPY will fold to a reload below.
1099  }
1100  }
1101 
1102  // Attempt to fold memory ops.
1103  if (foldMemoryOperand(Ops))
1104  continue;
1105 
1106  // Create a new virtual register for spill/fill.
1107  // FIXME: Infer regclass from instruction alone.
1108  Register NewVReg = Edit->createFrom(Reg);
1109 
1110  if (RI.Reads)
1111  insertReload(NewVReg, Idx, MI);
1112 
1113  // Rewrite instruction operands.
1114  bool hasLiveDef = false;
1115  for (const auto &OpPair : Ops) {
1116  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1117  MO.setReg(NewVReg);
1118  if (MO.isUse()) {
1119  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1120  MO.setIsKill();
1121  } else {
1122  if (!MO.isDead())
1123  hasLiveDef = true;
1124  }
1125  }
1126  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1127 
1128  // FIXME: Use a second vreg if instruction has no tied ops.
1129  if (RI.Writes)
1130  if (hasLiveDef)
1131  insertSpill(NewVReg, true, MI);
1132  }
1133 }
1134 
1135 /// spillAll - Spill all registers remaining after rematerialization.
1136 void InlineSpiller::spillAll() {
1137  // Update LiveStacks now that we are committed to spilling.
1138  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1139  StackSlot = VRM.assignVirt2StackSlot(Original);
1140  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1141  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1142  } else
1143  StackInt = &LSS.getInterval(StackSlot);
1144 
1145  if (Original != Edit->getReg())
1146  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1147 
1148  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1149  for (Register Reg : RegsToSpill)
1150  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1151  StackInt->getValNumInfo(0));
1152  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1153 
1154  // Spill around uses of all RegsToSpill.
1155  for (Register Reg : RegsToSpill)
1156  spillAroundUses(Reg);
1157 
1158  // Hoisted spills may cause dead code.
1159  if (!DeadDefs.empty()) {
1160  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1161  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1162  }
1163 
1164  // Finally delete the SnippetCopies.
1165  for (Register Reg : RegsToSpill) {
1168  RI != E; ) {
1169  MachineInstr &MI = *(RI++);
1170  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1171  // FIXME: Do this with a LiveRangeEdit callback.
1173  MI.eraseFromParent();
1174  }
1175  }
1176 
1177  // Delete all spilled registers.
1178  for (Register Reg : RegsToSpill)
1179  Edit->eraseVirtReg(Reg);
1180 }
1181 
1182 void InlineSpiller::spill(LiveRangeEdit &edit) {
1183  ++NumSpilledRanges;
1184  Edit = &edit;
1185  assert(!Register::isStackSlot(edit.getReg()) &&
1186  "Trying to spill a stack slot.");
1187  // Share a stack slot among all descendants of Original.
1188  Original = VRM.getOriginal(edit.getReg());
1189  StackSlot = VRM.getStackSlot(Original);
1190  StackInt = nullptr;
1191 
1192  LLVM_DEBUG(dbgs() << "Inline spilling "
1194  << ':' << edit.getParent() << "\nFrom original "
1195  << printReg(Original) << '\n');
1196  assert(edit.getParent().isSpillable() &&
1197  "Attempting to spill already spilled value.");
1198  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1199 
1200  collectRegsToSpill();
1201  reMaterializeAll();
1202 
1203  // Remat may handle everything.
1204  if (!RegsToSpill.empty())
1205  spillAll();
1206 
1207  Edit->calculateRegClassAndHint(MF, VRAI);
1208 }
1209 
1210 /// Optimizations after all the reg selections and spills are done.
1211 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1212 
1213 /// When a spill is inserted, add the spill to MergeableSpills map.
1214 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1215  unsigned Original) {
1217  LiveInterval &OrigLI = LIS.getInterval(Original);
1218  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1219  // LiveInterval may be cleared after all its references are spilled.
1220  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1221  auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1222  LI->assign(OrigLI, Allocator);
1223  StackSlotToOrigLI[StackSlot] = std::move(LI);
1224  }
1225  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1226  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1227  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1228  MergeableSpills[MIdx].insert(&Spill);
1229 }
1230 
1231 /// When a spill is removed, remove the spill from MergeableSpills map.
1232 /// Return true if the spill is removed successfully.
1233 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1234  int StackSlot) {
1235  auto It = StackSlotToOrigLI.find(StackSlot);
1236  if (It == StackSlotToOrigLI.end())
1237  return false;
1238  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1239  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1240  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1241  return MergeableSpills[MIdx].erase(&Spill);
1242 }
1243 
1244 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1245 /// i.e., there should be a living sibling of OrigReg at the insert point.
1246 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1247  MachineBasicBlock &BB, Register &LiveReg) {
1248  SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1249  // The original def could be after the last insert point in the root block,
1250  // we can't hoist to here.
1251  if (Idx < OrigVNI.def) {
1252  // TODO: We could be better here. If LI is not alive in landing pad
1253  // we could hoist spill after LIP.
1254  LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1255  return false;
1256  }
1257  Register OrigReg = OrigLI.reg();
1258  SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1259  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1260 
1261  for (const Register &SibReg : Siblings) {
1262  LiveInterval &LI = LIS.getInterval(SibReg);
1263  VNInfo *VNI = LI.getVNInfoAt(Idx);
1264  if (VNI) {
1265  LiveReg = SibReg;
1266  return true;
1267  }
1268  }
1269  return false;
1270 }
1271 
1272 /// Remove redundant spills in the same BB. Save those redundant spills in
1273 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1274 void HoistSpillHelper::rmRedundantSpills(
1276  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1278  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1279  // another spill inside. If a BB contains more than one spill, only keep the
1280  // earlier spill with smaller SlotIndex.
1281  for (const auto CurrentSpill : Spills) {
1282  MachineBasicBlock *Block = CurrentSpill->getParent();
1283  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1284  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1285  if (PrevSpill) {
1286  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1287  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1288  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1289  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1290  SpillsToRm.push_back(SpillToRm);
1291  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1292  } else {
1293  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1294  }
1295  }
1296  for (const auto SpillToRm : SpillsToRm)
1297  Spills.erase(SpillToRm);
1298 }
1299 
1300 /// Starting from \p Root find a top-down traversal order of the dominator
1301 /// tree to visit all basic blocks containing the elements of \p Spills.
1302 /// Redundant spills will be found and put into \p SpillsToRm at the same
1303 /// time. \p SpillBBToSpill will be populated as part of the process and
1304 /// maps a basic block to the first store occurring in the basic block.
1305 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1306 void HoistSpillHelper::getVisitOrders(
1309  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1312  // The set contains all the possible BB nodes to which we may hoist
1313  // original spills.
1315  // Save the BB nodes on the path from the first BB node containing
1316  // non-redundant spill to the Root node.
1318  // All the spills to be hoisted must originate from a single def instruction
1319  // to the OrigReg. It means the def instruction should dominate all the spills
1320  // to be hoisted. We choose the BB where the def instruction is located as
1321  // the Root.
1322  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1323  // For every node on the dominator tree with spill, walk up on the dominator
1324  // tree towards the Root node until it is reached. If there is other node
1325  // containing spill in the middle of the path, the previous spill saw will
1326  // be redundant and the node containing it will be removed. All the nodes on
1327  // the path starting from the first node with non-redundant spill to the Root
1328  // node will be added to the WorkSet, which will contain all the possible
1329  // locations where spills may be hoisted to after the loop below is done.
1330  for (const auto Spill : Spills) {
1331  MachineBasicBlock *Block = Spill->getParent();
1332  MachineDomTreeNode *Node = MDT[Block];
1333  MachineInstr *SpillToRm = nullptr;
1334  while (Node != RootIDomNode) {
1335  // If Node dominates Block, and it already contains a spill, the spill in
1336  // Block will be redundant.
1337  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1338  SpillToRm = SpillBBToSpill[MDT[Block]];
1339  break;
1340  /// If we see the Node already in WorkSet, the path from the Node to
1341  /// the Root node must already be traversed by another spill.
1342  /// Then no need to repeat.
1343  } else if (WorkSet.count(Node)) {
1344  break;
1345  } else {
1346  NodesOnPath.insert(Node);
1347  }
1348  Node = Node->getIDom();
1349  }
1350  if (SpillToRm) {
1351  SpillsToRm.push_back(SpillToRm);
1352  } else {
1353  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1354  // set the initial status before hoisting start. The value of BBs
1355  // containing original spills is set to 0, in order to descriminate
1356  // with BBs containing hoisted spills which will be inserted to
1357  // SpillsToKeep later during hoisting.
1358  SpillsToKeep[MDT[Block]] = 0;
1359  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1360  }
1361  NodesOnPath.clear();
1362  }
1363 
1364  // Sort the nodes in WorkSet in top-down order and save the nodes
1365  // in Orders. Orders will be used for hoisting in runHoistSpills.
1366  unsigned idx = 0;
1367  Orders.push_back(MDT.getBase().getNode(Root));
1368  do {
1369  MachineDomTreeNode *Node = Orders[idx++];
1370  for (MachineDomTreeNode *Child : Node->children()) {
1371  if (WorkSet.count(Child))
1372  Orders.push_back(Child);
1373  }
1374  } while (idx != Orders.size());
1375  assert(Orders.size() == WorkSet.size() &&
1376  "Orders have different size with WorkSet");
1377 
1378 #ifndef NDEBUG
1379  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1381  for (; RIt != Orders.rend(); RIt++)
1382  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1383  LLVM_DEBUG(dbgs() << "\n");
1384 #endif
1385 }
1386 
1387 /// Try to hoist spills according to BB hotness. The spills to removed will
1388 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1389 /// \p SpillsToIns.
1390 void HoistSpillHelper::runHoistSpills(
1391  LiveInterval &OrigLI, VNInfo &OrigVNI,
1393  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1395  // Visit order of dominator tree nodes.
1397  // SpillsToKeep contains all the nodes where spills are to be inserted
1398  // during hoisting. If the spill to be inserted is an original spill
1399  // (not a hoisted one), the value of the map entry is 0. If the spill
1400  // is a hoisted spill, the value of the map entry is the VReg to be used
1401  // as the source of the spill.
1403  // Map from BB to the first spill inside of it.
1405 
1406  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1407 
1408  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1409  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1410  SpillBBToSpill);
1411 
1412  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1413  // nodes set and the cost of all the spills inside those nodes.
1414  // The nodes set are the locations where spills are to be inserted
1415  // in the subtree of current node.
1416  using NodesCostPair =
1417  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1419 
1420  // Iterate Orders set in reverse order, which will be a bottom-up order
1421  // in the dominator tree. Once we visit a dom tree node, we know its
1422  // children have already been visited and the spill locations in the
1423  // subtrees of all the children have been determined.
1425  for (; RIt != Orders.rend(); RIt++) {
1426  MachineBasicBlock *Block = (*RIt)->getBlock();
1427 
1428  // If Block contains an original spill, simply continue.
1429  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1430  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1431  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1432  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1433  continue;
1434  }
1435 
1436  // Collect spills in subtree of current node (*RIt) to
1437  // SpillsInSubTreeMap[*RIt].first.
1438  for (MachineDomTreeNode *Child : (*RIt)->children()) {
1439  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1440  continue;
1441  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1442  // should be placed before getting the begin and end iterators of
1443  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1444  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1445  // and the map grows and then the original buckets in the map are moved.
1446  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1447  SpillsInSubTreeMap[*RIt].first;
1448  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1449  SubTreeCost += SpillsInSubTreeMap[Child].second;
1450  auto BI = SpillsInSubTreeMap[Child].first.begin();
1451  auto EI = SpillsInSubTreeMap[Child].first.end();
1452  SpillsInSubTree.insert(BI, EI);
1453  SpillsInSubTreeMap.erase(Child);
1454  }
1455 
1456  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1457  SpillsInSubTreeMap[*RIt].first;
1458  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1459  // No spills in subtree, simply continue.
1460  if (SpillsInSubTree.empty())
1461  continue;
1462 
1463  // Check whether Block is a possible candidate to insert spill.
1464  Register LiveReg;
1465  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1466  continue;
1467 
1468  // If there are multiple spills that could be merged, bias a little
1469  // to hoist the spill.
1470  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1471  ? BranchProbability(9, 10)
1472  : BranchProbability(1, 1);
1473  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1474  // Hoist: Move spills to current Block.
1475  for (const auto SpillBB : SpillsInSubTree) {
1476  // When SpillBB is a BB contains original spill, insert the spill
1477  // to SpillsToRm.
1478  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1479  !SpillsToKeep[SpillBB]) {
1480  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1481  SpillsToRm.push_back(SpillToRm);
1482  }
1483  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1484  SpillsToKeep.erase(SpillBB);
1485  }
1486  // Current Block is the BB containing the new hoisted spill. Add it to
1487  // SpillsToKeep. LiveReg is the source of the new spill.
1488  SpillsToKeep[*RIt] = LiveReg;
1489  LLVM_DEBUG({
1490  dbgs() << "spills in BB: ";
1491  for (const auto Rspill : SpillsInSubTree)
1492  dbgs() << Rspill->getBlock()->getNumber() << " ";
1493  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1494  << "\n";
1495  });
1496  SpillsInSubTree.clear();
1497  SpillsInSubTree.insert(*RIt);
1498  SubTreeCost = MBFI.getBlockFreq(Block);
1499  }
1500  }
1501  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1502  // save them to SpillsToIns.
1503  for (const auto &Ent : SpillsToKeep) {
1504  if (Ent.second)
1505  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1506  }
1507 }
1508 
1509 /// For spills with equal values, remove redundant spills and hoist those left
1510 /// to less hot spots.
1511 ///
1512 /// Spills with equal values will be collected into the same set in
1513 /// MergeableSpills when spill is inserted. These equal spills are originated
1514 /// from the same defining instruction and are dominated by the instruction.
1515 /// Before hoisting all the equal spills, redundant spills inside in the same
1516 /// BB are first marked to be deleted. Then starting from the spills left, walk
1517 /// up on the dominator tree towards the Root node where the define instruction
1518 /// is located, mark the dominated spills to be deleted along the way and
1519 /// collect the BB nodes on the path from non-dominated spills to the define
1520 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1521 /// where we are considering to hoist the spills. We iterate the WorkSet in
1522 /// bottom-up order, and for each node, we will decide whether to hoist spills
1523 /// inside its subtree to that node. In this way, we can get benefit locally
1524 /// even if hoisting all the equal spills to one cold place is impossible.
1525 void HoistSpillHelper::hoistAllSpills() {
1526  SmallVector<Register, 4> NewVRegs;
1527  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1528 
1529  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1531  Register Original = VRM.getPreSplitReg(Reg);
1532  if (!MRI.def_empty(Reg))
1533  Virt2SiblingsMap[Original].insert(Reg);
1534  }
1535 
1536  // Each entry in MergeableSpills contains a spill set with equal values.
1537  for (auto &Ent : MergeableSpills) {
1538  int Slot = Ent.first.first;
1539  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1540  VNInfo *OrigVNI = Ent.first.second;
1541  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1542  if (Ent.second.empty())
1543  continue;
1544 
1545  LLVM_DEBUG({
1546  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1547  << "Equal spills in BB: ";
1548  for (const auto spill : EqValSpills)
1549  dbgs() << spill->getParent()->getNumber() << " ";
1550  dbgs() << "\n";
1551  });
1552 
1553  // SpillsToRm is the spill set to be removed from EqValSpills.
1555  // SpillsToIns is the spill set to be newly inserted after hoisting.
1557 
1558  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1559 
1560  LLVM_DEBUG({
1561  dbgs() << "Finally inserted spills in BB: ";
1562  for (const auto &Ispill : SpillsToIns)
1563  dbgs() << Ispill.first->getNumber() << " ";
1564  dbgs() << "\nFinally removed spills in BB: ";
1565  for (const auto Rspill : SpillsToRm)
1566  dbgs() << Rspill->getParent()->getNumber() << " ";
1567  dbgs() << "\n";
1568  });
1569 
1570  // Stack live range update.
1571  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1572  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1573  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1574  StackIntvl.getValNumInfo(0));
1575 
1576  // Insert hoisted spills.
1577  for (auto const &Insert : SpillsToIns) {
1578  MachineBasicBlock *BB = Insert.first;
1579  Register LiveReg = Insert.second;
1580  MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1581  MachineInstrSpan MIS(MII, BB);
1582  TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1583  MRI.getRegClass(LiveReg), &TRI);
1584  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1585  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1586  getVDefInterval(MI, LIS);
1587  ++NumSpills;
1588  }
1589 
1590  // Remove redundant spills or change them to dead instructions.
1591  NumSpills -= SpillsToRm.size();
1592  for (auto const RMEnt : SpillsToRm) {
1593  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1594  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1595  MachineOperand &MO = RMEnt->getOperand(i - 1);
1596  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1597  RMEnt->RemoveOperand(i - 1);
1598  }
1599  }
1600  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1601  }
1602 }
1603 
1604 /// For VirtReg clone, the \p New register should have the same physreg or
1605 /// stackslot as the \p old register.
1606 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1607  if (VRM.hasPhys(Old))
1608  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1609  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1610  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1611  else
1612  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1613  if (VRM.hasShape(Old))
1614  VRM.assignVirt2Shape(New, VRM.getShape(Old));
1615 }
i
i
Definition: README.txt:29
llvm::Spiller
Spiller interface.
Definition: Spiller.h:24
llvm::LiveRangeEdit::Remat
Remat - Information needed to rematerialize at a specific location.
Definition: LiveRangeEdit.h:197
llvm::HexagonInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Definition: HexagonInstrInfo.cpp:293
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:103
MachineInstr.h
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:491
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
llvm::MachineRegisterInfo::use_instr_nodbg_end
static use_instr_nodbg_iterator use_instr_nodbg_end()
Definition: MachineRegisterInfo.h:538
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:102
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::VirtRegInfo::Tied
bool Tied
Tied - Uses and defs must use the same register.
Definition: MachineInstrBundle.h:229
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::MachineInstrSpan
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Definition: MachineBasicBlock.h:1181
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::MachineInstr::RemoveOperand
void RemoveOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
Definition: MachineInstr.cpp:303
Loops
Hexagon Hardware Loops
Definition: HexagonHardwareLoops.cpp:372
llvm::VirtRegInfo::Reads
bool Reads
Reads - One of the operands read the virtual register.
Definition: MachineInstrBundle.h:221
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:500
llvm::LiveInterval::isSpillable
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:813
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
Statistic.h
llvm::VirtRegAuxInfo
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
Definition: CalcSpillWeights.h:46
llvm::LiveInterval::weight
float weight() const
Definition: LiveInterval.h:712
ErrorHandling.h
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::MachineOperand::isTied
bool isTied() const
Definition: MachineOperand.h:441
MapVector.h
llvm::MachineRegisterInfo::defusechain_instr_iterator
defusechain_iterator - This class provides iterator support for machine operands in the function that...
Definition: MachineRegisterInfo.h:269
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
llvm::MachineRegisterInfo::use_instr_nodbg_begin
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:535
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::VNInfo::def
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::InsertPointAnalysis
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:51
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:233
llvm::PhysRegInfo::FullyDefined
bool FullyDefined
Reg or a super-register is defined.
Definition: MachineInstrBundle.h:256
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::begin
iterator begin()
Definition: DenseMap.h:74
BlockFrequency.h
Spiller.h
DenseMap.h
TargetInstrInfo.h
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::MachineRegisterInfo::getNumVirtRegs
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
Definition: MachineRegisterInfo.h:757
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::MachineRegisterInfo::reg_instr_begin
reg_instr_iterator reg_instr_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:294
llvm::LiveRange::vni_begin
vni_iterator vni_begin()
Definition: LiveInterval.h:224
llvm::Register::index2VirtReg
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::LiveRangeEdit::getReg
Register getReg() const
Definition: LiveRangeEdit.h:151
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::LiveQueryResult
Result of a LiveRange query.
Definition: LiveInterval.h:90
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
llvm::LiveRange::assign
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:244
llvm::AnalyzePhysRegInBundle
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
Definition: MachineInstrBundle.cpp:308
llvm::LiveIntervals::getMBBFromIndex
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Definition: LiveIntervals.h:255
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::LiveIntervals::getInstructionIndex
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
Definition: LiveIntervals.h:226
llvm::MachineLoopInfo
Definition: MachineLoopInfo.h:90
MachineRegisterInfo.h
llvm::MachineBasicBlock::erase
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
Definition: MachineBasicBlock.cpp:1299
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
llvm::createInlineSpiller
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
Definition: InlineSpiller.cpp:246
llvm::MachineOperand::isImplicit
bool isImplicit() const
Definition: MachineOperand.h:380
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
MachineLoopInfo.h
llvm::MachineBlockFrequencyInfo
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Definition: MachineBlockFrequencyInfo.h:33
llvm::AAResults
Definition: AliasAnalysis.h:456
llvm::MachineRegisterInfo::reg_bundle_begin
reg_bundle_iterator reg_bundle_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:310
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::Register::isStackSlot
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
Definition: Register.h:44
llvm::MachineOperand::isUse
bool isUse() const
Definition: MachineOperand.h:370
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::buildDbgValueForSpill
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Definition: MachineInstr.cpp:2228
llvm::MachineRegisterInfo::isReserved
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
Definition: MachineRegisterInfo.h:915
llvm::LiveIntervals::InsertMachineInstrInMaps
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
Definition: LiveIntervals.h:266
llvm::DomTreeNodeBase::children
iterator_range< iterator > children()
Definition: GenericDomTree.h:83
SplitKit.h
TargetOpcodes.h
llvm::MachineRegisterInfo::reg_instr_nodbg_end
static reg_instr_nodbg_iterator reg_instr_nodbg_end()
Definition: MachineRegisterInfo.h:349
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
pass
modulo schedule Modulo Schedule test pass
Definition: ModuloSchedule.cpp:2126
llvm::VirtRegInfo::Writes
bool Writes
Writes - One of the operands writes the virtual register.
Definition: MachineInstrBundle.h:224
llvm::LiveRangeEdit
Definition: LiveRangeEdit.h:46
MachineInstrBundle.h
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::MIBundleOperands
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
Definition: MachineInstrBundle.h:166
SmallPtrSet.h
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:680
isFullCopyOf
static Register isFullCopyOf(const MachineInstr &MI, Register Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
Definition: InlineSpiller.cpp:266
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:83
llvm::None
const NoneType None
Definition: None.h:23
llvm::MachineRegisterInfo::reg_instr_end
static reg_instr_iterator reg_instr_end()
Definition: MachineRegisterInfo.h:297
BranchProbability.h
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::MachineRegisterInfo::def_empty
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
Definition: MachineRegisterInfo.h:426
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:634
llvm::LiveRangeEdit::getParent
LiveInterval & getParent() const
Definition: LiveRangeEdit.h:146
LiveIntervalCalc.h
llvm::Spiller::~Spiller
virtual ~Spiller()=0
llvm::LiveIntervals::removePhysRegDefAt
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
Definition: LiveIntervals.cpp:1712
llvm::cl::opt< bool >
llvm::SlotIndex::getBaseIndex
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:241
llvm::BlockFrequency
Definition: BlockFrequency.h:24
llvm::LiveRange::getVNInfoBefore
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:421
llvm::MachineRegisterInfo::reg_bundle_end
static reg_bundle_iterator reg_bundle_end()
Definition: MachineRegisterInfo.h:313
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:280
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:321
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::AnalyzeVirtRegInBundle
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned >> *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
Definition: MachineInstrBundle.cpp:278
LiveIntervals.h
VirtRegMap.h
llvm::VNInfo::id
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
llvm::SmallPtrSetImpl::end
iterator end() const
Definition: SmallPtrSet.h:407
llvm::TargetRegisterInfo::getRegClassName
const char * getRegClassName(const TargetRegisterClass *Class) const
Returns the name of the register class.
Definition: TargetRegisterInfo.h:745
llvm::BumpPtrAllocatorImpl
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:67
llvm::HexagonInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index.
Definition: HexagonInstrInfo.cpp:913
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:714
llvm::LiveRange::getNumValNums
unsigned getNumValNums() const
Definition: LiveInterval.h:305
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:385
DisableHoisting
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
llvm::MachineRegisterInfo::reg_instr_nodbg_begin
reg_instr_nodbg_iterator reg_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:346
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
llvm::SmallPtrSetImpl::begin
iterator begin() const
Definition: SmallPtrSet.h:402
llvm::SlotIndex::isSameInstr
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:197
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1612
ArrayRef.h
llvm::LiveRange::Query
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:533
MachineFunctionPass.h
llvm::Register::isVirtualRegister
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineOperand::isEarlyClobber
bool isEarlyClobber() const
Definition: MachineOperand.h:436
llvm::PhysRegInfo
Information about how a physical register Reg is used by a set of operands.
Definition: MachineInstrBundle.h:246
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::MachineBasicBlock::predecessors
iterator_range< pred_iterator > predecessors()
Definition: MachineBasicBlock.h:349
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:254
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::MachineFunction
Definition: MachineFunction.h:230
isRealSpill
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
Definition: InlineSpiller.cpp:986
llvm::AArch64::RM
@ RM
Definition: AArch64ISelLowering.h:472
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:94
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
None.h
llvm::HexagonInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index.
Definition: HexagonInstrInfo.cpp:958
RestrictStatepointRemat
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::MachineOperand::setIsUndef
void setIsUndef(bool Val=true)
Definition: MachineOperand.h:511
llvm::BranchProbability
Definition: BranchProbability.h:30
Compiler.h
TargetSubtargetInfo.h
llvm::MachineOperand::isDef
bool isDef() const
Definition: MachineOperand.h:375
llvm::HexagonInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Definition: HexagonInstrInfo.cpp:245
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:286
llvm::DomTreeNodeBase
Base class for the actual dominator tree node.
Definition: LiveIntervalCalc.h:24
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::MachineOperand::getSubReg
unsigned getSubReg() const
Definition: MachineOperand.h:365
LiveRangeEdit.h
llvm::VNInfo::isPHIDef
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
dumpMachineInstrRangeWithSlotIndex
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
Definition: InlineSpiller.cpp:782
getVDefInterval
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
Definition: InlineSpiller.cpp:276
llvm::StatepointOpers
MI-level Statepoint operands.
Definition: StackMaps.h:158
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:83
llvm::pdb::PDB_LocType::Slot
@ Slot
StackMaps.h
llvm::LiveRange::getValNumInfo
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:309
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::VNInfo
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
llvm::LiveRange::getVNInfoAt
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:413
LiveInterval.h
llvm::VirtRegMap::NO_STACK_SLOT
@ NO_STACK_SLOT
Definition: VirtRegMap.h:37
llvm::LiveRange::MergeValueInAsValue
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
Definition: LiveInterval.cpp:736
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
SmallVector.h
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
LiveStacks.h
Allocator
Basic Register Allocator
Definition: RegAllocBasic.cpp:146
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
llvm::MachineOperand::setReg
void setReg(Register Reg)
Change the register this operand corresponds to.
Definition: MachineOperand.cpp:55
llvm::LiveIntervals::RemoveMachineInstrFromMaps
void RemoveMachineInstrFromMaps(MachineInstr &MI)
Definition: LiveIntervals.h:276
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:103
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1281
llvm::MachineInstr::getNumOperands
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:492
llvm::LiveIntervals::InsertMachineInstrRangeInMaps
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
Definition: LiveIntervals.h:270
llvm::LiveInterval::reg
Register reg() const
Definition: LiveInterval.h:711
llvm::SmallVectorImpl< MachineInstr * >
MachineOperand.h
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::LiveRangeEdit::Delegate
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:49
llvm::LiveRange::vni_iterator
VNInfoList::iterator vni_iterator
Definition: LiveInterval.h:221
llvm::VNInfo::isUnused
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
llvm::MachineBasicBlock::SkipPHIsLabelsAndDebug
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Definition: MachineBasicBlock.cpp:225
SlotIndexes.h
llvm::cl::desc
Definition: CommandLine.h:414
spill
the custom lowered code happens to be but we shouldn t have to custom lower anything This is probably related to< 2 x i64 > ops being so bad LLVM currently generates stack realignment when it is not necessary needed The problem is that we need to know about stack alignment too before RA runs At that point we don t whether there will be vector spill
Definition: README-SSE.txt:489
llvm::MachineRegisterInfo::reg_nodbg_empty
bool reg_nodbg_empty(Register RegNo) const
reg_nodbg_empty - Return true if the only instructions using or defining Reg are Debug instructions.
Definition: MachineRegisterInfo.h:377
raw_ostream.h
llvm::MachineDominatorTree
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Definition: MachineDominators.h:45
llvm::LiveRange::vni_end
vni_iterator vni_end()
Definition: LiveInterval.h:225
llvm::VirtRegInfo
VirtRegInfo - Information about a virtual register used by a set of operands.
Definition: MachineInstrBundle.h:218
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:110
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::pdb::PDB_SymType::Block
@ Block
llvm::LiveStacks
Definition: LiveStacks.h:31
MachineBlockFrequencyInfo.h
TargetRegisterInfo.h
Debug.h
llvm::LiveQueryResult::isKill
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
llvm::LiveIntervals::getVNInfoAllocator
VNInfo::Allocator & getVNInfoAllocator()
Definition: LiveIntervals.h:284
SetVector.h
MachineDominators.h
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364