LLVM  10.0.0svn
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "LiveRangeCalc.h"
15 #include "Spiller.h"
16 #include "SplitKit.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
48 #include "llvm/Config/llvm-config.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
56 #include <cassert>
57 #include <iterator>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "regalloc"
65 
66 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
67 STATISTIC(NumSnippets, "Number of spilled snippets");
68 STATISTIC(NumSpills, "Number of spills inserted");
69 STATISTIC(NumSpillsRemoved, "Number of spills removed");
70 STATISTIC(NumReloads, "Number of reloads inserted");
71 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
72 STATISTIC(NumFolded, "Number of folded stack accesses");
73 STATISTIC(NumFoldedLoads, "Number of folded loads");
74 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
75 
76 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77  cl::desc("Disable inline spill hoisting"));
78 static cl::opt<bool>
79 RestrictStatepointRemat("restrict-statepoint-remat",
80  cl::init(false), cl::Hidden,
81  cl::desc("Restrict remat for statepoint operands"));
82 
83 namespace {
84 
85 class HoistSpillHelper : private LiveRangeEdit::Delegate {
86  MachineFunction &MF;
87  LiveIntervals &LIS;
88  LiveStacks &LSS;
89  AliasAnalysis *AA;
92  VirtRegMap &VRM;
94  const TargetInstrInfo &TII;
95  const TargetRegisterInfo &TRI;
96  const MachineBlockFrequencyInfo &MBFI;
97 
99 
100  // Map from StackSlot to the LiveInterval of the original register.
101  // Note the LiveInterval of the original register may have been deleted
102  // after it is spilled. We keep a copy here to track the range where
103  // spills can be moved.
105 
106  // Map from pair of (StackSlot and Original VNI) to a set of spills which
107  // have the same stackslot and have equal values defined by Original VNI.
108  // These spills are mergeable and are hoist candiates.
109  using MergeableSpillsMap =
111  MergeableSpillsMap MergeableSpills;
112 
113  /// This is the map from original register to a set containing all its
114  /// siblings. To hoist a spill to another BB, we need to find out a live
115  /// sibling there and use it as the source of the new spill.
117 
118  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
119  MachineBasicBlock &BB, unsigned &LiveReg);
120 
121  void rmRedundantSpills(
122  SmallPtrSet<MachineInstr *, 16> &Spills,
125 
126  void getVisitOrders(
127  MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
132 
133  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
134  SmallPtrSet<MachineInstr *, 16> &Spills,
137 
138 public:
139  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
140  VirtRegMap &vrm)
141  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
142  LSS(pass.getAnalysis<LiveStacks>()),
143  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
144  MDT(pass.getAnalysis<MachineDominatorTree>()),
145  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
146  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
147  TRI(*mf.getSubtarget().getRegisterInfo()),
148  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
149  IPA(LIS, mf.getNumBlockIDs()) {}
150 
151  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
152  unsigned Original);
153  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
154  void hoistAllSpills();
155  void LRE_DidCloneVirtReg(unsigned, unsigned) override;
156 };
157 
158 class InlineSpiller : public Spiller {
159  MachineFunction &MF;
160  LiveIntervals &LIS;
161  LiveStacks &LSS;
162  AliasAnalysis *AA;
165  VirtRegMap &VRM;
167  const TargetInstrInfo &TII;
168  const TargetRegisterInfo &TRI;
169  const MachineBlockFrequencyInfo &MBFI;
170 
171  // Variables that are valid during spill(), but used by multiple methods.
172  LiveRangeEdit *Edit;
173  LiveInterval *StackInt;
174  int StackSlot;
175  unsigned Original;
176 
177  // All registers to spill to StackSlot, including the main register.
178  SmallVector<unsigned, 8> RegsToSpill;
179 
180  // All COPY instructions to/from snippets.
181  // They are ignored since both operands refer to the same stack slot.
182  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
183 
184  // Values that failed to remat at some point.
185  SmallPtrSet<VNInfo*, 8> UsedValues;
186 
187  // Dead defs generated during spilling.
189 
190  // Object records spills information and does the hoisting.
191  HoistSpillHelper HSpiller;
192 
193  ~InlineSpiller() override = default;
194 
195 public:
196  InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
197  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
198  LSS(pass.getAnalysis<LiveStacks>()),
199  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
200  MDT(pass.getAnalysis<MachineDominatorTree>()),
201  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
202  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
203  TRI(*mf.getSubtarget().getRegisterInfo()),
204  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
205  HSpiller(pass, mf, vrm) {}
206 
207  void spill(LiveRangeEdit &) override;
208  void postOptimization() override;
209 
210 private:
211  bool isSnippet(const LiveInterval &SnipLI);
212  void collectRegsToSpill();
213 
214  bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
215 
216  bool isSibling(unsigned Reg);
217  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
218  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
219 
220  void markValueUsed(LiveInterval*, VNInfo*);
221  bool canGuaranteeAssignmentAfterRemat(unsigned VReg, MachineInstr &MI);
222  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
223  void reMaterializeAll();
224 
225  bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
226  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
227  MachineInstr *LoadMI = nullptr);
228  void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
229  void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
230 
231  void spillAroundUses(unsigned Reg);
232  void spillAll();
233 };
234 
235 } // end anonymous namespace
236 
237 Spiller::~Spiller() = default;
238 
239 void Spiller::anchor() {}
240 
242  MachineFunction &mf,
243  VirtRegMap &vrm) {
244  return new InlineSpiller(pass, mf, vrm);
245 }
246 
247 //===----------------------------------------------------------------------===//
248 // Snippets
249 //===----------------------------------------------------------------------===//
250 
251 // When spilling a virtual register, we also spill any snippets it is connected
252 // to. The snippets are small live ranges that only have a single real use,
253 // leftovers from live range splitting. Spilling them enables memory operand
254 // folding or tightens the live range around the single use.
255 //
256 // This minimizes register pressure and maximizes the store-to-load distance for
257 // spill slots which can be important in tight loops.
258 
259 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
260 /// otherwise return 0.
261 static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
262  if (!MI.isFullCopy())
263  return 0;
264  if (MI.getOperand(0).getReg() == Reg)
265  return MI.getOperand(1).getReg();
266  if (MI.getOperand(1).getReg() == Reg)
267  return MI.getOperand(0).getReg();
268  return 0;
269 }
270 
271 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
272 /// It is assumed that SnipLI is a virtual register with the same original as
273 /// Edit->getReg().
274 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
275  unsigned Reg = Edit->getReg();
276 
277  // A snippet is a tiny live range with only a single instruction using it
278  // besides copies to/from Reg or spills/fills. We accept:
279  //
280  // %snip = COPY %Reg / FILL fi#
281  // %snip = USE %snip
282  // %Reg = COPY %snip / SPILL %snip, fi#
283  //
284  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
285  return false;
286 
287  MachineInstr *UseMI = nullptr;
288 
289  // Check that all uses satisfy our criteria.
291  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
292  E = MRI.reg_instr_nodbg_end(); RI != E; ) {
293  MachineInstr &MI = *RI++;
294 
295  // Allow copies to/from Reg.
296  if (isFullCopyOf(MI, Reg))
297  continue;
298 
299  // Allow stack slot loads.
300  int FI;
301  if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
302  continue;
303 
304  // Allow stack slot stores.
305  if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
306  continue;
307 
308  // Allow a single additional instruction.
309  if (UseMI && &MI != UseMI)
310  return false;
311  UseMI = &MI;
312  }
313  return true;
314 }
315 
316 /// collectRegsToSpill - Collect live range snippets that only have a single
317 /// real use.
318 void InlineSpiller::collectRegsToSpill() {
319  unsigned Reg = Edit->getReg();
320 
321  // Main register always spills.
322  RegsToSpill.assign(1, Reg);
323  SnippetCopies.clear();
324 
325  // Snippets all have the same original, so there can't be any for an original
326  // register.
327  if (Original == Reg)
328  return;
329 
331  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
332  MachineInstr &MI = *RI++;
333  unsigned SnipReg = isFullCopyOf(MI, Reg);
334  if (!isSibling(SnipReg))
335  continue;
336  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
337  if (!isSnippet(SnipLI))
338  continue;
339  SnippetCopies.insert(&MI);
340  if (isRegToSpill(SnipReg))
341  continue;
342  RegsToSpill.push_back(SnipReg);
343  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
344  ++NumSnippets;
345  }
346 }
347 
348 bool InlineSpiller::isSibling(unsigned Reg) {
350  VRM.getOriginal(Reg) == Original;
351 }
352 
353 /// It is beneficial to spill to earlier place in the same BB in case
354 /// as follows:
355 /// There is an alternative def earlier in the same MBB.
356 /// Hoist the spill as far as possible in SpillMBB. This can ease
357 /// register pressure:
358 ///
359 /// x = def
360 /// y = use x
361 /// s = copy x
362 ///
363 /// Hoisting the spill of s to immediately after the def removes the
364 /// interference between x and y:
365 ///
366 /// x = def
367 /// spill x
368 /// y = use killed x
369 ///
370 /// This hoist only helps when the copy kills its source.
371 ///
372 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
373  MachineInstr &CopyMI) {
374  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
375 #ifndef NDEBUG
376  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
377  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
378 #endif
379 
380  unsigned SrcReg = CopyMI.getOperand(1).getReg();
381  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
382  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
383  LiveQueryResult SrcQ = SrcLI.Query(Idx);
384  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
385  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
386  return false;
387 
388  // Conservatively extend the stack slot range to the range of the original
389  // value. We may be able to do better with stack slot coloring by being more
390  // careful here.
391  assert(StackInt && "No stack slot assigned yet.");
392  LiveInterval &OrigLI = LIS.getInterval(Original);
393  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
394  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
395  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
396  << *StackInt << '\n');
397 
398  // We are going to spill SrcVNI immediately after its def, so clear out
399  // any later spills of the same value.
400  eliminateRedundantSpills(SrcLI, SrcVNI);
401 
402  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
404  if (SrcVNI->isPHIDef())
405  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
406  else {
407  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
408  assert(DefMI && "Defining instruction disappeared");
409  MII = DefMI;
410  ++MII;
411  }
412  // Insert spill without kill flag immediately after def.
413  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
414  MRI.getRegClass(SrcReg), &TRI);
415  --MII; // Point to store instruction.
416  LIS.InsertMachineInstrInMaps(*MII);
417  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
418 
419  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
420  ++NumSpills;
421  return true;
422 }
423 
424 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
425 /// redundant spills of this value in SLI.reg and sibling copies.
426 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
427  assert(VNI && "Missing value");
429  WorkList.push_back(std::make_pair(&SLI, VNI));
430  assert(StackInt && "No stack slot assigned yet.");
431 
432  do {
433  LiveInterval *LI;
434  std::tie(LI, VNI) = WorkList.pop_back_val();
435  unsigned Reg = LI->reg;
436  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
437  << VNI->def << " in " << *LI << '\n');
438 
439  // Regs to spill are taken care of.
440  if (isRegToSpill(Reg))
441  continue;
442 
443  // Add all of VNI's live range to StackInt.
444  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
445  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
446 
447  // Find all spills and copies of VNI.
449  UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
450  UI != E; ) {
451  MachineInstr &MI = *UI++;
452  if (!MI.isCopy() && !MI.mayStore())
453  continue;
454  SlotIndex Idx = LIS.getInstructionIndex(MI);
455  if (LI->getVNInfoAt(Idx) != VNI)
456  continue;
457 
458  // Follow sibling copies down the dominator tree.
459  if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
460  if (isSibling(DstReg)) {
461  LiveInterval &DstLI = LIS.getInterval(DstReg);
462  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
463  assert(DstVNI && "Missing defined value");
464  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
465  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
466  }
467  continue;
468  }
469 
470  // Erase spills.
471  int FI;
472  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
473  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
474  // eliminateDeadDefs won't normally remove stores, so switch opcode.
475  MI.setDesc(TII.get(TargetOpcode::KILL));
476  DeadDefs.push_back(&MI);
477  ++NumSpillsRemoved;
478  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
479  --NumSpills;
480  }
481  }
482  } while (!WorkList.empty());
483 }
484 
485 //===----------------------------------------------------------------------===//
486 // Rematerialization
487 //===----------------------------------------------------------------------===//
488 
489 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
490 /// instruction cannot be eliminated. See through snippet copies
491 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
493  WorkList.push_back(std::make_pair(LI, VNI));
494  do {
495  std::tie(LI, VNI) = WorkList.pop_back_val();
496  if (!UsedValues.insert(VNI).second)
497  continue;
498 
499  if (VNI->isPHIDef()) {
500  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
501  for (MachineBasicBlock *P : MBB->predecessors()) {
502  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
503  if (PVNI)
504  WorkList.push_back(std::make_pair(LI, PVNI));
505  }
506  continue;
507  }
508 
509  // Follow snippet copies.
510  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
511  if (!SnippetCopies.count(MI))
512  continue;
513  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
514  assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
515  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
516  assert(SnipVNI && "Snippet undefined before copy");
517  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
518  } while (!WorkList.empty());
519 }
520 
521 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(unsigned VReg,
522  MachineInstr &MI) {
524  return true;
525  // Here's a quick explanation of the problem we're trying to handle here:
526  // * There are some pseudo instructions with more vreg uses than there are
527  // physical registers on the machine.
528  // * This is normally handled by spilling the vreg, and folding the reload
529  // into the user instruction. (Thus decreasing the number of used vregs
530  // until the remainder can be assigned to physregs.)
531  // * However, since we may try to spill vregs in any order, we can end up
532  // trying to spill each operand to the instruction, and then rematting it
533  // instead. When that happens, the new live intervals (for the remats) are
534  // expected to be trivially assignable (i.e. RS_Done). However, since we
535  // may have more remats than physregs, we're guaranteed to fail to assign
536  // one.
537  // At the moment, we only handle this for STATEPOINTs since they're the only
538  // psuedo op where we've seen this. If we start seeing other instructions
539  // with the same problem, we need to revisit this.
540  return (MI.getOpcode() != TargetOpcode::STATEPOINT);
541 }
542 
543 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
544 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
545  // Analyze instruction
547  MIBundleOperands::VirtRegInfo RI =
548  MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
549 
550  if (!RI.Reads)
551  return false;
552 
553  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
554  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
555 
556  if (!ParentVNI) {
557  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
558  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
559  MachineOperand &MO = MI.getOperand(i);
560  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
561  MO.setIsUndef();
562  }
563  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
564  return true;
565  }
566 
567  if (SnippetCopies.count(&MI))
568  return false;
569 
570  LiveInterval &OrigLI = LIS.getInterval(Original);
571  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
572  LiveRangeEdit::Remat RM(ParentVNI);
573  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
574 
575  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
576  markValueUsed(&VirtReg, ParentVNI);
577  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
578  return false;
579  }
580 
581  // If the instruction also writes VirtReg.reg, it had better not require the
582  // same register for uses and defs.
583  if (RI.Tied) {
584  markValueUsed(&VirtReg, ParentVNI);
585  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
586  return false;
587  }
588 
589  // Before rematerializing into a register for a single instruction, try to
590  // fold a load into the instruction. That avoids allocating a new register.
591  if (RM.OrigMI->canFoldAsLoad() &&
592  foldMemoryOperand(Ops, RM.OrigMI)) {
593  Edit->markRematerialized(RM.ParentVNI);
594  ++NumFoldedLoads;
595  return true;
596  }
597 
598  // If we can't guarantee that we'll be able to actually assign the new vreg,
599  // we can't remat.
600  if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg, MI)) {
601  markValueUsed(&VirtReg, ParentVNI);
602  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
603  return false;
604  }
605 
606  // Allocate a new register for the remat.
607  unsigned NewVReg = Edit->createFrom(Original);
608 
609  // Finally we can rematerialize OrigMI before MI.
610  SlotIndex DefIdx =
611  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
612 
613  // We take the DebugLoc from MI, since OrigMI may be attributed to a
614  // different source location.
615  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
616  NewMI->setDebugLoc(MI.getDebugLoc());
617 
618  (void)DefIdx;
619  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
620  << *LIS.getInstructionFromIndex(DefIdx));
621 
622  // Replace operands
623  for (const auto &OpPair : Ops) {
624  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
625  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
626  MO.setReg(NewVReg);
627  MO.setIsKill();
628  }
629  }
630  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
631 
632  ++NumRemats;
633  return true;
634 }
635 
636 /// reMaterializeAll - Try to rematerialize as many uses as possible,
637 /// and trim the live ranges after.
638 void InlineSpiller::reMaterializeAll() {
639  if (!Edit->anyRematerializable(AA))
640  return;
641 
642  UsedValues.clear();
643 
644  // Try to remat before all uses of snippets.
645  bool anyRemat = false;
646  for (unsigned Reg : RegsToSpill) {
647  LiveInterval &LI = LIS.getInterval(Reg);
649  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
650  RegI != E; ) {
651  MachineInstr &MI = *RegI++;
652 
653  // Debug values are not allowed to affect codegen.
654  if (MI.isDebugValue())
655  continue;
656 
657  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
658  "instruction that isn't a DBG_VALUE");
659 
660  anyRemat |= reMaterializeFor(LI, MI);
661  }
662  }
663  if (!anyRemat)
664  return;
665 
666  // Remove any values that were completely rematted.
667  for (unsigned Reg : RegsToSpill) {
668  LiveInterval &LI = LIS.getInterval(Reg);
669  for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
670  I != E; ++I) {
671  VNInfo *VNI = *I;
672  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
673  continue;
674  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
675  MI->addRegisterDead(Reg, &TRI);
676  if (!MI->allDefsAreDead())
677  continue;
678  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
679  DeadDefs.push_back(MI);
680  }
681  }
682 
683  // Eliminate dead code after remat. Note that some snippet copies may be
684  // deleted here.
685  if (DeadDefs.empty())
686  return;
687  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
688  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
689 
690  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
691  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
692  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
693  // removed, PHI VNI are still left in the LiveInterval.
694  // So to get rid of unused reg, we need to check whether it has non-dbg
695  // reference instead of whether it has non-empty interval.
696  unsigned ResultPos = 0;
697  for (unsigned Reg : RegsToSpill) {
698  if (MRI.reg_nodbg_empty(Reg)) {
699  Edit->eraseVirtReg(Reg);
700  continue;
701  }
702 
703  assert(LIS.hasInterval(Reg) &&
704  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
705  "Empty and not used live-range?!");
706 
707  RegsToSpill[ResultPos++] = Reg;
708  }
709  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
710  LLVM_DEBUG(dbgs() << RegsToSpill.size()
711  << " registers to spill after remat.\n");
712 }
713 
714 //===----------------------------------------------------------------------===//
715 // Spilling
716 //===----------------------------------------------------------------------===//
717 
718 /// If MI is a load or store of StackSlot, it can be removed.
719 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
720  int FI = 0;
721  unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
722  bool IsLoad = InstrReg;
723  if (!IsLoad)
724  InstrReg = TII.isStoreToStackSlot(*MI, FI);
725 
726  // We have a stack access. Is it the right register and slot?
727  if (InstrReg != Reg || FI != StackSlot)
728  return false;
729 
730  if (!IsLoad)
731  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
732 
733  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
734  LIS.RemoveMachineInstrFromMaps(*MI);
735  MI->eraseFromParent();
736 
737  if (IsLoad) {
738  ++NumReloadsRemoved;
739  --NumReloads;
740  } else {
741  ++NumSpillsRemoved;
742  --NumSpills;
743  }
744 
745  return true;
746 }
747 
748 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
750 // Dump the range of instructions from B to E with their slot indexes.
753  LiveIntervals const &LIS,
754  const char *const header,
755  unsigned VReg =0) {
756  char NextLine = '\n';
757  char SlotIndent = '\t';
758 
759  if (std::next(B) == E) {
760  NextLine = ' ';
761  SlotIndent = ' ';
762  }
763 
764  dbgs() << '\t' << header << ": " << NextLine;
765 
766  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
767  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
768 
769  // If a register was passed in and this instruction has it as a
770  // destination that is marked as an early clobber, print the
771  // early-clobber slot index.
772  if (VReg) {
773  MachineOperand *MO = I->findRegisterDefOperand(VReg);
774  if (MO && MO->isEarlyClobber())
775  Idx = Idx.getRegSlot(true);
776  }
777 
778  dbgs() << SlotIndent << Idx << '\t' << *I;
779  }
780 }
781 #endif
782 
783 /// foldMemoryOperand - Try folding stack slot references in Ops into their
784 /// instructions.
785 ///
786 /// @param Ops Operand indices from analyzeVirtReg().
787 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
788 /// @return True on success.
789 bool InlineSpiller::
790 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
791  MachineInstr *LoadMI) {
792  if (Ops.empty())
793  return false;
794  // Don't attempt folding in bundles.
795  MachineInstr *MI = Ops.front().first;
796  if (Ops.back().first != MI || MI->isBundled())
797  return false;
798 
799  bool WasCopy = MI->isCopy();
800  unsigned ImpReg = 0;
801 
802  // Spill subregs if the target allows it.
803  // We always want to spill subregs for stackmap/patchpoint pseudos.
804  bool SpillSubRegs = TII.isSubregFoldable() ||
805  MI->getOpcode() == TargetOpcode::STATEPOINT ||
806  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
807  MI->getOpcode() == TargetOpcode::STACKMAP;
808 
809  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
810  // operands.
811  SmallVector<unsigned, 8> FoldOps;
812  for (const auto &OpPair : Ops) {
813  unsigned Idx = OpPair.second;
814  assert(MI == OpPair.first && "Instruction conflict during operand folding");
815  MachineOperand &MO = MI->getOperand(Idx);
816  if (MO.isImplicit()) {
817  ImpReg = MO.getReg();
818  continue;
819  }
820 
821  if (!SpillSubRegs && MO.getSubReg())
822  return false;
823  // We cannot fold a load instruction into a def.
824  if (LoadMI && MO.isDef())
825  return false;
826  // Tied use operands should not be passed to foldMemoryOperand.
827  if (!MI->isRegTiedToDefOperand(Idx))
828  FoldOps.push_back(Idx);
829  }
830 
831  // If we only have implicit uses, we won't be able to fold that.
832  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
833  if (FoldOps.empty())
834  return false;
835 
836  MachineInstrSpan MIS(MI, MI->getParent());
837 
838  MachineInstr *FoldMI =
839  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
840  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
841  if (!FoldMI)
842  return false;
843 
844  // Remove LIS for any dead defs in the original MI not in FoldMI.
845  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
846  if (!MO->isReg())
847  continue;
848  unsigned Reg = MO->getReg();
849  if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
850  MRI.isReserved(Reg)) {
851  continue;
852  }
853  // Skip non-Defs, including undef uses and internal reads.
854  if (MO->isUse())
855  continue;
856  MIBundleOperands::PhysRegInfo RI =
857  MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI);
858  if (RI.FullyDefined)
859  continue;
860  // FoldMI does not define this physreg. Remove the LI segment.
861  assert(MO->isDead() && "Cannot fold physreg def");
862  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
863  LIS.removePhysRegDefAt(Reg, Idx);
864  }
865 
866  int FI;
867  if (TII.isStoreToStackSlot(*MI, FI) &&
868  HSpiller.rmFromMergeableSpills(*MI, FI))
869  --NumSpills;
870  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
871  if (MI->isCall())
872  MI->getMF()->updateCallSiteInfo(MI, FoldMI);
873  MI->eraseFromParent();
874 
875  // Insert any new instructions other than FoldMI into the LIS maps.
876  assert(!MIS.empty() && "Unexpected empty span of instructions!");
877  for (MachineInstr &MI : MIS)
878  if (&MI != FoldMI)
879  LIS.InsertMachineInstrInMaps(MI);
880 
881  // TII.foldMemoryOperand may have left some implicit operands on the
882  // instruction. Strip them.
883  if (ImpReg)
884  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
885  MachineOperand &MO = FoldMI->getOperand(i - 1);
886  if (!MO.isReg() || !MO.isImplicit())
887  break;
888  if (MO.getReg() == ImpReg)
889  FoldMI->RemoveOperand(i - 1);
890  }
891 
892  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
893  "folded"));
894 
895  if (!WasCopy)
896  ++NumFolded;
897  else if (Ops.front().second == 0) {
898  ++NumSpills;
899  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
900  } else
901  ++NumReloads;
902  return true;
903 }
904 
905 void InlineSpiller::insertReload(unsigned NewVReg,
906  SlotIndex Idx,
908  MachineBasicBlock &MBB = *MI->getParent();
909 
910  MachineInstrSpan MIS(MI, &MBB);
911  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
912  MRI.getRegClass(NewVReg), &TRI);
913 
914  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
915 
917  NewVReg));
918  ++NumReloads;
919 }
920 
921 /// Check if \p Def fully defines a VReg with an undefined value.
922 /// If that's the case, that means the value of VReg is actually
923 /// not relevant.
924 static bool isFullUndefDef(const MachineInstr &Def) {
925  if (!Def.isImplicitDef())
926  return false;
927  assert(Def.getNumOperands() == 1 &&
928  "Implicit def with more than one definition");
929  // We can say that the VReg defined by Def is undef, only if it is
930  // fully defined by Def. Otherwise, some of the lanes may not be
931  // undef and the value of the VReg matters.
932  return !Def.getOperand(0).getSubReg();
933 }
934 
935 /// insertSpill - Insert a spill of NewVReg after MI.
936 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
938  MachineBasicBlock &MBB = *MI->getParent();
939 
940  MachineInstrSpan MIS(MI, &MBB);
941  bool IsRealSpill = true;
942  if (isFullUndefDef(*MI)) {
943  // Don't spill undef value.
944  // Anything works for undef, in particular keeping the memory
945  // uninitialized is a viable option and it saves code size and
946  // run time.
947  BuildMI(MBB, std::next(MI), MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
948  .addReg(NewVReg, getKillRegState(isKill));
949  IsRealSpill = false;
950  } else
951  TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
952  MRI.getRegClass(NewVReg), &TRI);
953 
954  LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
955 
956  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
957  "spill"));
958  ++NumSpills;
959  if (IsRealSpill)
960  HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
961 }
962 
963 /// spillAroundUses - insert spill code around each use of Reg.
964 void InlineSpiller::spillAroundUses(unsigned Reg) {
965  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
966  LiveInterval &OldLI = LIS.getInterval(Reg);
967 
968  // Iterate over instructions using Reg.
970  RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
971  RegI != E; ) {
972  MachineInstr *MI = &*(RegI++);
973 
974  // Debug values are not allowed to affect codegen.
975  if (MI->isDebugValue()) {
976  // Modify DBG_VALUE now that the value is in a spill slot.
977  MachineBasicBlock *MBB = MI->getParent();
978  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
979  buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
980  MBB->erase(MI);
981  continue;
982  }
983 
984  assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
985  "instruction that isn't a DBG_VALUE");
986 
987  // Ignore copies to/from snippets. We'll delete them.
988  if (SnippetCopies.count(MI))
989  continue;
990 
991  // Stack slot accesses may coalesce away.
992  if (coalesceStackAccess(MI, Reg))
993  continue;
994 
995  // Analyze instruction.
997  MIBundleOperands::VirtRegInfo RI =
998  MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops);
999 
1000  // Find the slot index where this instruction reads and writes OldLI.
1001  // This is usually the def slot, except for tied early clobbers.
1002  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1003  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1004  if (SlotIndex::isSameInstr(Idx, VNI->def))
1005  Idx = VNI->def;
1006 
1007  // Check for a sibling copy.
1008  unsigned SibReg = isFullCopyOf(*MI, Reg);
1009  if (SibReg && isSibling(SibReg)) {
1010  // This may actually be a copy between snippets.
1011  if (isRegToSpill(SibReg)) {
1012  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1013  SnippetCopies.insert(MI);
1014  continue;
1015  }
1016  if (RI.Writes) {
1017  if (hoistSpillInsideBB(OldLI, *MI)) {
1018  // This COPY is now dead, the value is already in the stack slot.
1019  MI->getOperand(0).setIsDead();
1020  DeadDefs.push_back(MI);
1021  continue;
1022  }
1023  } else {
1024  // This is a reload for a sib-reg copy. Drop spills downstream.
1025  LiveInterval &SibLI = LIS.getInterval(SibReg);
1026  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1027  // The COPY will fold to a reload below.
1028  }
1029  }
1030 
1031  // Attempt to fold memory ops.
1032  if (foldMemoryOperand(Ops))
1033  continue;
1034 
1035  // Create a new virtual register for spill/fill.
1036  // FIXME: Infer regclass from instruction alone.
1037  unsigned NewVReg = Edit->createFrom(Reg);
1038 
1039  if (RI.Reads)
1040  insertReload(NewVReg, Idx, MI);
1041 
1042  // Rewrite instruction operands.
1043  bool hasLiveDef = false;
1044  for (const auto &OpPair : Ops) {
1045  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1046  MO.setReg(NewVReg);
1047  if (MO.isUse()) {
1048  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1049  MO.setIsKill();
1050  } else {
1051  if (!MO.isDead())
1052  hasLiveDef = true;
1053  }
1054  }
1055  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1056 
1057  // FIXME: Use a second vreg if instruction has no tied ops.
1058  if (RI.Writes)
1059  if (hasLiveDef)
1060  insertSpill(NewVReg, true, MI);
1061  }
1062 }
1063 
1064 /// spillAll - Spill all registers remaining after rematerialization.
1065 void InlineSpiller::spillAll() {
1066  // Update LiveStacks now that we are committed to spilling.
1067  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1068  StackSlot = VRM.assignVirt2StackSlot(Original);
1069  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1070  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1071  } else
1072  StackInt = &LSS.getInterval(StackSlot);
1073 
1074  if (Original != Edit->getReg())
1075  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1076 
1077  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1078  for (unsigned Reg : RegsToSpill)
1079  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1080  StackInt->getValNumInfo(0));
1081  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1082 
1083  // Spill around uses of all RegsToSpill.
1084  for (unsigned Reg : RegsToSpill)
1085  spillAroundUses(Reg);
1086 
1087  // Hoisted spills may cause dead code.
1088  if (!DeadDefs.empty()) {
1089  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1090  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1091  }
1092 
1093  // Finally delete the SnippetCopies.
1094  for (unsigned Reg : RegsToSpill) {
1096  RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1097  RI != E; ) {
1098  MachineInstr &MI = *(RI++);
1099  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1100  // FIXME: Do this with a LiveRangeEdit callback.
1101  LIS.RemoveMachineInstrFromMaps(MI);
1102  MI.eraseFromParent();
1103  }
1104  }
1105 
1106  // Delete all spilled registers.
1107  for (unsigned Reg : RegsToSpill)
1108  Edit->eraseVirtReg(Reg);
1109 }
1110 
1111 void InlineSpiller::spill(LiveRangeEdit &edit) {
1112  ++NumSpilledRanges;
1113  Edit = &edit;
1115  && "Trying to spill a stack slot.");
1116  // Share a stack slot among all descendants of Original.
1117  Original = VRM.getOriginal(edit.getReg());
1118  StackSlot = VRM.getStackSlot(Original);
1119  StackInt = nullptr;
1120 
1121  LLVM_DEBUG(dbgs() << "Inline spilling "
1122  << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1123  << ':' << edit.getParent() << "\nFrom original "
1124  << printReg(Original) << '\n');
1125  assert(edit.getParent().isSpillable() &&
1126  "Attempting to spill already spilled value.");
1127  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1128 
1129  collectRegsToSpill();
1130  reMaterializeAll();
1131 
1132  // Remat may handle everything.
1133  if (!RegsToSpill.empty())
1134  spillAll();
1135 
1136  Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1137 }
1138 
1139 /// Optimizations after all the reg selections and spills are done.
1140 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1141 
1142 /// When a spill is inserted, add the spill to MergeableSpills map.
1143 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1144  unsigned Original) {
1145  BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1146  LiveInterval &OrigLI = LIS.getInterval(Original);
1147  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1148  // LiveInterval may be cleared after all its references are spilled.
1149  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1150  auto LI = llvm::make_unique<LiveInterval>(OrigLI.reg, OrigLI.weight);
1151  LI->assign(OrigLI, Allocator);
1152  StackSlotToOrigLI[StackSlot] = std::move(LI);
1153  }
1154  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1155  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1156  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1157  MergeableSpills[MIdx].insert(&Spill);
1158 }
1159 
1160 /// When a spill is removed, remove the spill from MergeableSpills map.
1161 /// Return true if the spill is removed successfully.
1162 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1163  int StackSlot) {
1164  auto It = StackSlotToOrigLI.find(StackSlot);
1165  if (It == StackSlotToOrigLI.end())
1166  return false;
1167  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1168  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1169  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1170  return MergeableSpills[MIdx].erase(&Spill);
1171 }
1172 
1173 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1174 /// i.e., there should be a living sibling of OrigReg at the insert point.
1175 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1176  MachineBasicBlock &BB, unsigned &LiveReg) {
1177  SlotIndex Idx;
1178  unsigned OrigReg = OrigLI.reg;
1179  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1180  if (MI != BB.end())
1181  Idx = LIS.getInstructionIndex(*MI);
1182  else
1183  Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1184  SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1185  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1186 
1187  for (auto const SibReg : Siblings) {
1188  LiveInterval &LI = LIS.getInterval(SibReg);
1189  VNInfo *VNI = LI.getVNInfoAt(Idx);
1190  if (VNI) {
1191  LiveReg = SibReg;
1192  return true;
1193  }
1194  }
1195  return false;
1196 }
1197 
1198 /// Remove redundant spills in the same BB. Save those redundant spills in
1199 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1200 void HoistSpillHelper::rmRedundantSpills(
1202  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1204  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1205  // another spill inside. If a BB contains more than one spill, only keep the
1206  // earlier spill with smaller SlotIndex.
1207  for (const auto CurrentSpill : Spills) {
1208  MachineBasicBlock *Block = CurrentSpill->getParent();
1209  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1210  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1211  if (PrevSpill) {
1212  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1213  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1214  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1215  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1216  SpillsToRm.push_back(SpillToRm);
1217  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1218  } else {
1219  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1220  }
1221  }
1222  for (const auto SpillToRm : SpillsToRm)
1223  Spills.erase(SpillToRm);
1224 }
1225 
1226 /// Starting from \p Root find a top-down traversal order of the dominator
1227 /// tree to visit all basic blocks containing the elements of \p Spills.
1228 /// Redundant spills will be found and put into \p SpillsToRm at the same
1229 /// time. \p SpillBBToSpill will be populated as part of the process and
1230 /// maps a basic block to the first store occurring in the basic block.
1231 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1232 void HoistSpillHelper::getVisitOrders(
1235  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1238  // The set contains all the possible BB nodes to which we may hoist
1239  // original spills.
1241  // Save the BB nodes on the path from the first BB node containing
1242  // non-redundant spill to the Root node.
1244  // All the spills to be hoisted must originate from a single def instruction
1245  // to the OrigReg. It means the def instruction should dominate all the spills
1246  // to be hoisted. We choose the BB where the def instruction is located as
1247  // the Root.
1248  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1249  // For every node on the dominator tree with spill, walk up on the dominator
1250  // tree towards the Root node until it is reached. If there is other node
1251  // containing spill in the middle of the path, the previous spill saw will
1252  // be redundant and the node containing it will be removed. All the nodes on
1253  // the path starting from the first node with non-redundant spill to the Root
1254  // node will be added to the WorkSet, which will contain all the possible
1255  // locations where spills may be hoisted to after the loop below is done.
1256  for (const auto Spill : Spills) {
1257  MachineBasicBlock *Block = Spill->getParent();
1258  MachineDomTreeNode *Node = MDT[Block];
1259  MachineInstr *SpillToRm = nullptr;
1260  while (Node != RootIDomNode) {
1261  // If Node dominates Block, and it already contains a spill, the spill in
1262  // Block will be redundant.
1263  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1264  SpillToRm = SpillBBToSpill[MDT[Block]];
1265  break;
1266  /// If we see the Node already in WorkSet, the path from the Node to
1267  /// the Root node must already be traversed by another spill.
1268  /// Then no need to repeat.
1269  } else if (WorkSet.count(Node)) {
1270  break;
1271  } else {
1272  NodesOnPath.insert(Node);
1273  }
1274  Node = Node->getIDom();
1275  }
1276  if (SpillToRm) {
1277  SpillsToRm.push_back(SpillToRm);
1278  } else {
1279  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1280  // set the initial status before hoisting start. The value of BBs
1281  // containing original spills is set to 0, in order to descriminate
1282  // with BBs containing hoisted spills which will be inserted to
1283  // SpillsToKeep later during hoisting.
1284  SpillsToKeep[MDT[Block]] = 0;
1285  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1286  }
1287  NodesOnPath.clear();
1288  }
1289 
1290  // Sort the nodes in WorkSet in top-down order and save the nodes
1291  // in Orders. Orders will be used for hoisting in runHoistSpills.
1292  unsigned idx = 0;
1293  Orders.push_back(MDT.getBase().getNode(Root));
1294  do {
1295  MachineDomTreeNode *Node = Orders[idx++];
1296  const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
1297  unsigned NumChildren = Children.size();
1298  for (unsigned i = 0; i != NumChildren; ++i) {
1299  MachineDomTreeNode *Child = Children[i];
1300  if (WorkSet.count(Child))
1301  Orders.push_back(Child);
1302  }
1303  } while (idx != Orders.size());
1304  assert(Orders.size() == WorkSet.size() &&
1305  "Orders have different size with WorkSet");
1306 
1307 #ifndef NDEBUG
1308  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1310  for (; RIt != Orders.rend(); RIt++)
1311  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1312  LLVM_DEBUG(dbgs() << "\n");
1313 #endif
1314 }
1315 
1316 /// Try to hoist spills according to BB hotness. The spills to removed will
1317 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1318 /// \p SpillsToIns.
1319 void HoistSpillHelper::runHoistSpills(
1320  LiveInterval &OrigLI, VNInfo &OrigVNI,
1322  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1324  // Visit order of dominator tree nodes.
1326  // SpillsToKeep contains all the nodes where spills are to be inserted
1327  // during hoisting. If the spill to be inserted is an original spill
1328  // (not a hoisted one), the value of the map entry is 0. If the spill
1329  // is a hoisted spill, the value of the map entry is the VReg to be used
1330  // as the source of the spill.
1332  // Map from BB to the first spill inside of it.
1334 
1335  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1336 
1337  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1338  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1339  SpillBBToSpill);
1340 
1341  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1342  // nodes set and the cost of all the spills inside those nodes.
1343  // The nodes set are the locations where spills are to be inserted
1344  // in the subtree of current node.
1345  using NodesCostPair =
1346  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1348 
1349  // Iterate Orders set in reverse order, which will be a bottom-up order
1350  // in the dominator tree. Once we visit a dom tree node, we know its
1351  // children have already been visited and the spill locations in the
1352  // subtrees of all the children have been determined.
1354  for (; RIt != Orders.rend(); RIt++) {
1355  MachineBasicBlock *Block = (*RIt)->getBlock();
1356 
1357  // If Block contains an original spill, simply continue.
1358  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1359  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1360  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1361  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1362  continue;
1363  }
1364 
1365  // Collect spills in subtree of current node (*RIt) to
1366  // SpillsInSubTreeMap[*RIt].first.
1367  const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();
1368  unsigned NumChildren = Children.size();
1369  for (unsigned i = 0; i != NumChildren; ++i) {
1370  MachineDomTreeNode *Child = Children[i];
1371  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1372  continue;
1373  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1374  // should be placed before getting the begin and end iterators of
1375  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1376  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1377  // and the map grows and then the original buckets in the map are moved.
1378  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1379  SpillsInSubTreeMap[*RIt].first;
1380  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1381  SubTreeCost += SpillsInSubTreeMap[Child].second;
1382  auto BI = SpillsInSubTreeMap[Child].first.begin();
1383  auto EI = SpillsInSubTreeMap[Child].first.end();
1384  SpillsInSubTree.insert(BI, EI);
1385  SpillsInSubTreeMap.erase(Child);
1386  }
1387 
1388  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1389  SpillsInSubTreeMap[*RIt].first;
1390  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1391  // No spills in subtree, simply continue.
1392  if (SpillsInSubTree.empty())
1393  continue;
1394 
1395  // Check whether Block is a possible candidate to insert spill.
1396  unsigned LiveReg = 0;
1397  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1398  continue;
1399 
1400  // If there are multiple spills that could be merged, bias a little
1401  // to hoist the spill.
1402  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1403  ? BranchProbability(9, 10)
1404  : BranchProbability(1, 1);
1405  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1406  // Hoist: Move spills to current Block.
1407  for (const auto SpillBB : SpillsInSubTree) {
1408  // When SpillBB is a BB contains original spill, insert the spill
1409  // to SpillsToRm.
1410  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1411  !SpillsToKeep[SpillBB]) {
1412  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1413  SpillsToRm.push_back(SpillToRm);
1414  }
1415  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1416  SpillsToKeep.erase(SpillBB);
1417  }
1418  // Current Block is the BB containing the new hoisted spill. Add it to
1419  // SpillsToKeep. LiveReg is the source of the new spill.
1420  SpillsToKeep[*RIt] = LiveReg;
1421  LLVM_DEBUG({
1422  dbgs() << "spills in BB: ";
1423  for (const auto Rspill : SpillsInSubTree)
1424  dbgs() << Rspill->getBlock()->getNumber() << " ";
1425  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1426  << "\n";
1427  });
1428  SpillsInSubTree.clear();
1429  SpillsInSubTree.insert(*RIt);
1430  SubTreeCost = MBFI.getBlockFreq(Block);
1431  }
1432  }
1433  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1434  // save them to SpillsToIns.
1435  for (const auto Ent : SpillsToKeep) {
1436  if (Ent.second)
1437  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1438  }
1439 }
1440 
1441 /// For spills with equal values, remove redundant spills and hoist those left
1442 /// to less hot spots.
1443 ///
1444 /// Spills with equal values will be collected into the same set in
1445 /// MergeableSpills when spill is inserted. These equal spills are originated
1446 /// from the same defining instruction and are dominated by the instruction.
1447 /// Before hoisting all the equal spills, redundant spills inside in the same
1448 /// BB are first marked to be deleted. Then starting from the spills left, walk
1449 /// up on the dominator tree towards the Root node where the define instruction
1450 /// is located, mark the dominated spills to be deleted along the way and
1451 /// collect the BB nodes on the path from non-dominated spills to the define
1452 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1453 /// where we are considering to hoist the spills. We iterate the WorkSet in
1454 /// bottom-up order, and for each node, we will decide whether to hoist spills
1455 /// inside its subtree to that node. In this way, we can get benefit locally
1456 /// even if hoisting all the equal spills to one cold place is impossible.
1457 void HoistSpillHelper::hoistAllSpills() {
1458  SmallVector<unsigned, 4> NewVRegs;
1459  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1460 
1461  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1462  unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
1463  unsigned Original = VRM.getPreSplitReg(Reg);
1464  if (!MRI.def_empty(Reg))
1465  Virt2SiblingsMap[Original].insert(Reg);
1466  }
1467 
1468  // Each entry in MergeableSpills contains a spill set with equal values.
1469  for (auto &Ent : MergeableSpills) {
1470  int Slot = Ent.first.first;
1471  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1472  VNInfo *OrigVNI = Ent.first.second;
1473  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1474  if (Ent.second.empty())
1475  continue;
1476 
1477  LLVM_DEBUG({
1478  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1479  << "Equal spills in BB: ";
1480  for (const auto spill : EqValSpills)
1481  dbgs() << spill->getParent()->getNumber() << " ";
1482  dbgs() << "\n";
1483  });
1484 
1485  // SpillsToRm is the spill set to be removed from EqValSpills.
1487  // SpillsToIns is the spill set to be newly inserted after hoisting.
1489 
1490  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1491 
1492  LLVM_DEBUG({
1493  dbgs() << "Finally inserted spills in BB: ";
1494  for (const auto Ispill : SpillsToIns)
1495  dbgs() << Ispill.first->getNumber() << " ";
1496  dbgs() << "\nFinally removed spills in BB: ";
1497  for (const auto Rspill : SpillsToRm)
1498  dbgs() << Rspill->getParent()->getNumber() << " ";
1499  dbgs() << "\n";
1500  });
1501 
1502  // Stack live range update.
1503  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1504  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1505  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1506  StackIntvl.getValNumInfo(0));
1507 
1508  // Insert hoisted spills.
1509  for (auto const Insert : SpillsToIns) {
1510  MachineBasicBlock *BB = Insert.first;
1511  unsigned LiveReg = Insert.second;
1512  MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
1513  TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
1514  MRI.getRegClass(LiveReg), &TRI);
1515  LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
1516  ++NumSpills;
1517  }
1518 
1519  // Remove redundant spills or change them to dead instructions.
1520  NumSpills -= SpillsToRm.size();
1521  for (auto const RMEnt : SpillsToRm) {
1522  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1523  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1524  MachineOperand &MO = RMEnt->getOperand(i - 1);
1525  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1526  RMEnt->RemoveOperand(i - 1);
1527  }
1528  }
1529  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1530  }
1531 }
1532 
1533 /// For VirtReg clone, the \p New register should have the same physreg or
1534 /// stackslot as the \p old register.
1535 void HoistSpillHelper::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
1536  if (VRM.hasPhys(Old))
1537  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1538  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1539  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1540  else
1541  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1542 }
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:77
Safe Stack instrumentation pass
Definition: SafeStack.cpp:899
const unsigned reg
Definition: LiveInterval.h:704
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:635
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:241
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:60
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:473
static unsigned index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
VNInfoList::iterator vni_iterator
Definition: LiveInterval.h:216
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:385
vni_iterator vni_begin()
Definition: LiveInterval.h:219
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:675
MIBundleOperands - Iterate over all operands in a bundle of machine instructions. ...
void setIsUndef(bool Val=true)
void eliminateDeadDefs(SmallVectorImpl< MachineInstr *> &Dead, ArrayRef< unsigned > RegsBeingSpilled=None, AliasAnalysis *AA=nullptr)
eliminateDeadDefs - Try to delete machine instructions that are now dead (allDefsAreDead returns true...
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned Reg
unsigned getSubReg() const
Spiller interface.
Definition: Spiller.h:23
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:37
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
void setIsDead(bool Val=true)
VNInfo - Value Number Information.
Definition: LiveInterval.h:52
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:49
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:48
VirtRegInfo analyzeVirtReg(unsigned Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
analyzeVirtReg - Analyze how the current instruction or bundle uses a virtual register.
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:80
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
bool isEarlyClobber() const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
Hexagon Hardware Loops
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:414
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
Result of a LiveRange query.
Definition: LiveInterval.h:89
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:411
PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI)
analyzePhysReg - Analyze how the current instruction or bundle uses a physical register.
MachineBasicBlock::iterator end()
defusechain_iterator - This class provides iterator support for machine operands in the function that...
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isFullCopy() const
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def...
Definition: SlotIndexes.h:254
Base class for the actual dominator tree node.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
const std::vector< DomTreeNodeBase * > & getChildren() const
unsigned getReg() const
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:111
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
unsigned getKillRegState(bool B)
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:528
void updateCallSiteInfo(const MachineInstr *Old, const MachineInstr *New=nullptr)
Update call sites info by deleting entry for Old call instruction.
TargetInstrInfo - Interface to description of machine instruction set.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:408
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:822
#define P(N)
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, unsigned VReg=0)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:140
bool isBundled() const
Return true if this instruction part of a bundle.
Definition: MachineInstr.h:358
MachineInstrBuilder & UseMI
DomTreeNodeBase * getIDom() const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
iterator_range< pred_iterator > predecessors()
bool isCopy() const
bool isImplicitDef() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
bool isDebugInstr() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setIsKill(bool Val=true)
virtual ~Spiller()=0
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
size_type size() const
Definition: SmallPtrSet.h:92
unsigned id
The ID number of this value.
Definition: LiveInterval.h:57
Basic Register Allocator
LiveInterval & getParent() const
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:297
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:417
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
static bool isFullUndefDef(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:197
bool isDebugValue() const
MachineOperand class - Representation of each machine instruction operand.
MachineInstrBuilder MachineInstrBuilder & DefMI
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx...
Definition: LiveInterval.h:416
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:256
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Spiller * createInlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
unsigned getNumValNums() const
Definition: LiveInterval.h:300
Representation of each machine instruction.
Definition: MachineInstr.h:64
iterator begin() const
Definition: SmallPtrSet.h:396
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:239
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:289
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:755
void setReg(unsigned Reg)
Change the register this operand corresponds to.
#define I(x, y, z)
Definition: MD5.cpp:58
static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0...
iterator end() const
Definition: SmallPtrSet.h:401
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Remat - Information needed to rematerialize at a specific location.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:804
iterator SkipPHIsLabelsAndDebug(iterator I)
Return the first instruction in MBB after I that is not a PHI, label or debug.
IRTranslator LLVM IR MI
Register getReg() const
getReg - Returns the register number.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object...
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:83
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
vni_iterator vni_end()
Definition: LiveInterval.h:220
bool isImplicit() const
MachineBasicBlock::iterator begin()
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1251