LLVM  16.0.0git
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
41 #include "llvm/CodeGen/Spiller.h"
42 #include "llvm/CodeGen/StackMaps.h"
48 #include "llvm/Config/llvm-config.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
56 #include <cassert>
57 #include <iterator>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "regalloc"
65 
66 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
67 STATISTIC(NumSnippets, "Number of spilled snippets");
68 STATISTIC(NumSpills, "Number of spills inserted");
69 STATISTIC(NumSpillsRemoved, "Number of spills removed");
70 STATISTIC(NumReloads, "Number of reloads inserted");
71 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
72 STATISTIC(NumFolded, "Number of folded stack accesses");
73 STATISTIC(NumFoldedLoads, "Number of folded loads");
74 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
75 
76 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77  cl::desc("Disable inline spill hoisting"));
78 static cl::opt<bool>
79 RestrictStatepointRemat("restrict-statepoint-remat",
80  cl::init(false), cl::Hidden,
81  cl::desc("Restrict remat for statepoint operands"));
82 
83 namespace {
84 
85 class HoistSpillHelper : private LiveRangeEdit::Delegate {
86  MachineFunction &MF;
87  LiveIntervals &LIS;
88  LiveStacks &LSS;
91  VirtRegMap &VRM;
93  const TargetInstrInfo &TII;
94  const TargetRegisterInfo &TRI;
95  const MachineBlockFrequencyInfo &MBFI;
96 
98 
99  // Map from StackSlot to the LiveInterval of the original register.
100  // Note the LiveInterval of the original register may have been deleted
101  // after it is spilled. We keep a copy here to track the range where
102  // spills can be moved.
104 
105  // Map from pair of (StackSlot and Original VNI) to a set of spills which
106  // have the same stackslot and have equal values defined by Original VNI.
107  // These spills are mergeable and are hoist candidates.
108  using MergeableSpillsMap =
110  MergeableSpillsMap MergeableSpills;
111 
112  /// This is the map from original register to a set containing all its
113  /// siblings. To hoist a spill to another BB, we need to find out a live
114  /// sibling there and use it as the source of the new spill.
116 
117  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
118  MachineBasicBlock &BB, Register &LiveReg);
119 
120  void rmRedundantSpills(
124 
125  void getVisitOrders(
131 
132  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
136 
137 public:
138  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
139  VirtRegMap &vrm)
140  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
141  LSS(pass.getAnalysis<LiveStacks>()),
142  MDT(pass.getAnalysis<MachineDominatorTree>()),
143  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
144  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
145  TRI(*mf.getSubtarget().getRegisterInfo()),
146  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
147  IPA(LIS, mf.getNumBlockIDs()) {}
148 
149  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
150  unsigned Original);
151  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
152  void hoistAllSpills();
153  void LRE_DidCloneVirtReg(Register, Register) override;
154 };
155 
156 class InlineSpiller : public Spiller {
157  MachineFunction &MF;
158  LiveIntervals &LIS;
159  LiveStacks &LSS;
162  VirtRegMap &VRM;
164  const TargetInstrInfo &TII;
165  const TargetRegisterInfo &TRI;
166  const MachineBlockFrequencyInfo &MBFI;
167 
168  // Variables that are valid during spill(), but used by multiple methods.
169  LiveRangeEdit *Edit;
170  LiveInterval *StackInt;
171  int StackSlot;
172  Register Original;
173 
174  // All registers to spill to StackSlot, including the main register.
175  SmallVector<Register, 8> RegsToSpill;
176 
177  // All COPY instructions to/from snippets.
178  // They are ignored since both operands refer to the same stack slot.
179  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
180 
181  // Values that failed to remat at some point.
182  SmallPtrSet<VNInfo*, 8> UsedValues;
183 
184  // Dead defs generated during spilling.
186 
187  // Object records spills information and does the hoisting.
188  HoistSpillHelper HSpiller;
189 
190  // Live range weight calculator.
191  VirtRegAuxInfo &VRAI;
192 
193  ~InlineSpiller() override = default;
194 
195 public:
196  InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
197  VirtRegAuxInfo &VRAI)
198  : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
199  LSS(Pass.getAnalysis<LiveStacks>()),
200  MDT(Pass.getAnalysis<MachineDominatorTree>()),
201  Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
202  MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
203  TRI(*MF.getSubtarget().getRegisterInfo()),
204  MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
205  HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
206 
207  void spill(LiveRangeEdit &) override;
208  void postOptimization() override;
209 
210 private:
211  bool isSnippet(const LiveInterval &SnipLI);
212  void collectRegsToSpill();
213 
214  bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
215 
216  bool isSibling(Register Reg);
217  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
218  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
219 
220  void markValueUsed(LiveInterval*, VNInfo*);
221  bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
222  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
223  void reMaterializeAll();
224 
225  bool coalesceStackAccess(MachineInstr *MI, Register Reg);
226  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
227  MachineInstr *LoadMI = nullptr);
228  void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
229  void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
230 
231  void spillAroundUses(Register Reg);
232  void spillAll();
233 };
234 
235 } // end anonymous namespace
236 
237 Spiller::~Spiller() = default;
238 
239 void Spiller::anchor() {}
240 
242  MachineFunction &MF, VirtRegMap &VRM,
243  VirtRegAuxInfo &VRAI) {
244  return new InlineSpiller(Pass, MF, VRM, VRAI);
245 }
246 
247 //===----------------------------------------------------------------------===//
248 // Snippets
249 //===----------------------------------------------------------------------===//
250 
251 // When spilling a virtual register, we also spill any snippets it is connected
252 // to. The snippets are small live ranges that only have a single real use,
253 // leftovers from live range splitting. Spilling them enables memory operand
254 // folding or tightens the live range around the single use.
255 //
256 // This minimizes register pressure and maximizes the store-to-load distance for
257 // spill slots which can be important in tight loops.
258 
259 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
260 /// otherwise return 0.
262  if (!MI.isFullCopy())
263  return Register();
264  if (MI.getOperand(0).getReg() == Reg)
265  return MI.getOperand(1).getReg();
266  if (MI.getOperand(1).getReg() == Reg)
267  return MI.getOperand(0).getReg();
268  return Register();
269 }
270 
271 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
272  for (const MachineOperand &MO : MI.operands())
273  if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
274  LIS.getInterval(MO.getReg());
275 }
276 
277 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
278 /// It is assumed that SnipLI is a virtual register with the same original as
279 /// Edit->getReg().
280 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
281  Register Reg = Edit->getReg();
282 
283  // A snippet is a tiny live range with only a single instruction using it
284  // besides copies to/from Reg or spills/fills. We accept:
285  //
286  // %snip = COPY %Reg / FILL fi#
287  // %snip = USE %snip
288  // %Reg = COPY %snip / SPILL %snip, fi#
289  //
290  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
291  return false;
292 
293  MachineInstr *UseMI = nullptr;
294 
295  // Check that all uses satisfy our criteria.
297  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
299  RI != E;) {
300  MachineInstr &MI = *RI++;
301 
302  // Allow copies to/from Reg.
303  if (isFullCopyOf(MI, Reg))
304  continue;
305 
306  // Allow stack slot loads.
307  int FI;
308  if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
309  continue;
310 
311  // Allow stack slot stores.
312  if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
313  continue;
314 
315  // Allow a single additional instruction.
316  if (UseMI && &MI != UseMI)
317  return false;
318  UseMI = &MI;
319  }
320  return true;
321 }
322 
323 /// collectRegsToSpill - Collect live range snippets that only have a single
324 /// real use.
325 void InlineSpiller::collectRegsToSpill() {
326  Register Reg = Edit->getReg();
327 
328  // Main register always spills.
329  RegsToSpill.assign(1, Reg);
330  SnippetCopies.clear();
331 
332  // Snippets all have the same original, so there can't be any for an original
333  // register.
334  if (Original == Reg)
335  return;
336 
337  for (MachineInstr &MI :
339  Register SnipReg = isFullCopyOf(MI, Reg);
340  if (!isSibling(SnipReg))
341  continue;
342  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
343  if (!isSnippet(SnipLI))
344  continue;
345  SnippetCopies.insert(&MI);
346  if (isRegToSpill(SnipReg))
347  continue;
348  RegsToSpill.push_back(SnipReg);
349  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
350  ++NumSnippets;
351  }
352 }
353 
354 bool InlineSpiller::isSibling(Register Reg) {
355  return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
356 }
357 
358 /// It is beneficial to spill to earlier place in the same BB in case
359 /// as follows:
360 /// There is an alternative def earlier in the same MBB.
361 /// Hoist the spill as far as possible in SpillMBB. This can ease
362 /// register pressure:
363 ///
364 /// x = def
365 /// y = use x
366 /// s = copy x
367 ///
368 /// Hoisting the spill of s to immediately after the def removes the
369 /// interference between x and y:
370 ///
371 /// x = def
372 /// spill x
373 /// y = use killed x
374 ///
375 /// This hoist only helps when the copy kills its source.
376 ///
377 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
378  MachineInstr &CopyMI) {
379  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
380 #ifndef NDEBUG
381  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
382  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
383 #endif
384 
385  Register SrcReg = CopyMI.getOperand(1).getReg();
386  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
387  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
388  LiveQueryResult SrcQ = SrcLI.Query(Idx);
389  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
390  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
391  return false;
392 
393  // Conservatively extend the stack slot range to the range of the original
394  // value. We may be able to do better with stack slot coloring by being more
395  // careful here.
396  assert(StackInt && "No stack slot assigned yet.");
397  LiveInterval &OrigLI = LIS.getInterval(Original);
398  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
399  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
400  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
401  << *StackInt << '\n');
402 
403  // We are going to spill SrcVNI immediately after its def, so clear out
404  // any later spills of the same value.
405  eliminateRedundantSpills(SrcLI, SrcVNI);
406 
407  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
409  if (SrcVNI->isPHIDef())
410  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
411  else {
412  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
413  assert(DefMI && "Defining instruction disappeared");
414  MII = DefMI;
415  ++MII;
416  }
417  MachineInstrSpan MIS(MII, MBB);
418  // Insert spill without kill flag immediately after def.
419  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
420  MRI.getRegClass(SrcReg), &TRI);
421  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
422  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
423  getVDefInterval(MI, LIS);
424  --MII; // Point to store instruction.
425  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
426 
427  // If there is only 1 store instruction is required for spill, add it
428  // to mergeable list. In X86 AMX, 2 intructions are required to store.
429  // We disable the merge for this case.
430  if (MIS.begin() == MII)
431  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
432  ++NumSpills;
433  return true;
434 }
435 
436 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
437 /// redundant spills of this value in SLI.reg and sibling copies.
438 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
439  assert(VNI && "Missing value");
441  WorkList.push_back(std::make_pair(&SLI, VNI));
442  assert(StackInt && "No stack slot assigned yet.");
443 
444  do {
445  LiveInterval *LI;
446  std::tie(LI, VNI) = WorkList.pop_back_val();
447  Register Reg = LI->reg();
448  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
449  << VNI->def << " in " << *LI << '\n');
450 
451  // Regs to spill are taken care of.
452  if (isRegToSpill(Reg))
453  continue;
454 
455  // Add all of VNI's live range to StackInt.
456  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
457  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
458 
459  // Find all spills and copies of VNI.
460  for (MachineInstr &MI :
462  if (!MI.isCopy() && !MI.mayStore())
463  continue;
464  SlotIndex Idx = LIS.getInstructionIndex(MI);
465  if (LI->getVNInfoAt(Idx) != VNI)
466  continue;
467 
468  // Follow sibling copies down the dominator tree.
469  if (Register DstReg = isFullCopyOf(MI, Reg)) {
470  if (isSibling(DstReg)) {
471  LiveInterval &DstLI = LIS.getInterval(DstReg);
472  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
473  assert(DstVNI && "Missing defined value");
474  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
475  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
476  }
477  continue;
478  }
479 
480  // Erase spills.
481  int FI;
482  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
483  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
484  // eliminateDeadDefs won't normally remove stores, so switch opcode.
485  MI.setDesc(TII.get(TargetOpcode::KILL));
486  DeadDefs.push_back(&MI);
487  ++NumSpillsRemoved;
488  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
489  --NumSpills;
490  }
491  }
492  } while (!WorkList.empty());
493 }
494 
495 //===----------------------------------------------------------------------===//
496 // Rematerialization
497 //===----------------------------------------------------------------------===//
498 
499 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
500 /// instruction cannot be eliminated. See through snippet copies
501 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
503  WorkList.push_back(std::make_pair(LI, VNI));
504  do {
505  std::tie(LI, VNI) = WorkList.pop_back_val();
506  if (!UsedValues.insert(VNI).second)
507  continue;
508 
509  if (VNI->isPHIDef()) {
510  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
511  for (MachineBasicBlock *P : MBB->predecessors()) {
512  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
513  if (PVNI)
514  WorkList.push_back(std::make_pair(LI, PVNI));
515  }
516  continue;
517  }
518 
519  // Follow snippet copies.
520  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
521  if (!SnippetCopies.count(MI))
522  continue;
523  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
524  assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
525  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
526  assert(SnipVNI && "Snippet undefined before copy");
527  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
528  } while (!WorkList.empty());
529 }
530 
531 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
532  MachineInstr &MI) {
534  return true;
535  // Here's a quick explanation of the problem we're trying to handle here:
536  // * There are some pseudo instructions with more vreg uses than there are
537  // physical registers on the machine.
538  // * This is normally handled by spilling the vreg, and folding the reload
539  // into the user instruction. (Thus decreasing the number of used vregs
540  // until the remainder can be assigned to physregs.)
541  // * However, since we may try to spill vregs in any order, we can end up
542  // trying to spill each operand to the instruction, and then rematting it
543  // instead. When that happens, the new live intervals (for the remats) are
544  // expected to be trivially assignable (i.e. RS_Done). However, since we
545  // may have more remats than physregs, we're guaranteed to fail to assign
546  // one.
547  // At the moment, we only handle this for STATEPOINTs since they're the only
548  // pseudo op where we've seen this. If we start seeing other instructions
549  // with the same problem, we need to revisit this.
550  if (MI.getOpcode() != TargetOpcode::STATEPOINT)
551  return true;
552  // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
553  // that number of physical registers is enough to cover all fixed arguments.
554  // If it is not true we need to revisit it.
555  for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
556  EndIdx = MI.getNumOperands();
557  Idx < EndIdx; ++Idx) {
558  MachineOperand &MO = MI.getOperand(Idx);
559  if (MO.isReg() && MO.getReg() == VReg)
560  return false;
561  }
562  return true;
563 }
564 
565 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
566 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
567  // Analyze instruction
569  VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
570 
571  if (!RI.Reads)
572  return false;
573 
574  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
575  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
576 
577  if (!ParentVNI) {
578  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
579  for (MachineOperand &MO : MI.operands())
580  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
581  MO.setIsUndef();
582  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
583  return true;
584  }
585 
586  if (SnippetCopies.count(&MI))
587  return false;
588 
589  LiveInterval &OrigLI = LIS.getInterval(Original);
590  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
591  LiveRangeEdit::Remat RM(ParentVNI);
592  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
593 
594  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
595  markValueUsed(&VirtReg, ParentVNI);
596  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
597  return false;
598  }
599 
600  // If the instruction also writes VirtReg.reg, it had better not require the
601  // same register for uses and defs.
602  if (RI.Tied) {
603  markValueUsed(&VirtReg, ParentVNI);
604  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
605  return false;
606  }
607 
608  // Before rematerializing into a register for a single instruction, try to
609  // fold a load into the instruction. That avoids allocating a new register.
610  if (RM.OrigMI->canFoldAsLoad() &&
611  foldMemoryOperand(Ops, RM.OrigMI)) {
612  Edit->markRematerialized(RM.ParentVNI);
613  ++NumFoldedLoads;
614  return true;
615  }
616 
617  // If we can't guarantee that we'll be able to actually assign the new vreg,
618  // we can't remat.
619  if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
620  markValueUsed(&VirtReg, ParentVNI);
621  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
622  return false;
623  }
624 
625  // Allocate a new register for the remat.
626  Register NewVReg = Edit->createFrom(Original);
627 
628  // Finally we can rematerialize OrigMI before MI.
629  SlotIndex DefIdx =
630  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
631 
632  // We take the DebugLoc from MI, since OrigMI may be attributed to a
633  // different source location.
634  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
635  NewMI->setDebugLoc(MI.getDebugLoc());
636 
637  (void)DefIdx;
638  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
639  << *LIS.getInstructionFromIndex(DefIdx));
640 
641  // Replace operands
642  for (const auto &OpPair : Ops) {
643  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
644  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
645  MO.setReg(NewVReg);
646  MO.setIsKill();
647  }
648  }
649  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
650 
651  ++NumRemats;
652  return true;
653 }
654 
655 /// reMaterializeAll - Try to rematerialize as many uses as possible,
656 /// and trim the live ranges after.
657 void InlineSpiller::reMaterializeAll() {
658  if (!Edit->anyRematerializable())
659  return;
660 
661  UsedValues.clear();
662 
663  // Try to remat before all uses of snippets.
664  bool anyRemat = false;
665  for (Register Reg : RegsToSpill) {
666  LiveInterval &LI = LIS.getInterval(Reg);
668  // Debug values are not allowed to affect codegen.
669  if (MI.isDebugValue())
670  continue;
671 
672  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
673  "instruction that isn't a DBG_VALUE");
674 
675  anyRemat |= reMaterializeFor(LI, MI);
676  }
677  }
678  if (!anyRemat)
679  return;
680 
681  // Remove any values that were completely rematted.
682  for (Register Reg : RegsToSpill) {
683  LiveInterval &LI = LIS.getInterval(Reg);
684  for (VNInfo *VNI : LI.vnis()) {
685  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
686  continue;
687  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
688  MI->addRegisterDead(Reg, &TRI);
689  if (!MI->allDefsAreDead())
690  continue;
691  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
692  DeadDefs.push_back(MI);
693  }
694  }
695 
696  // Eliminate dead code after remat. Note that some snippet copies may be
697  // deleted here.
698  if (DeadDefs.empty())
699  return;
700  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
701  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
702 
703  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
704  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
705  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
706  // removed, PHI VNI are still left in the LiveInterval.
707  // So to get rid of unused reg, we need to check whether it has non-dbg
708  // reference instead of whether it has non-empty interval.
709  unsigned ResultPos = 0;
710  for (Register Reg : RegsToSpill) {
711  if (MRI.reg_nodbg_empty(Reg)) {
712  Edit->eraseVirtReg(Reg);
713  continue;
714  }
715 
716  assert(LIS.hasInterval(Reg) &&
717  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
718  "Empty and not used live-range?!");
719 
720  RegsToSpill[ResultPos++] = Reg;
721  }
722  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
723  LLVM_DEBUG(dbgs() << RegsToSpill.size()
724  << " registers to spill after remat.\n");
725 }
726 
727 //===----------------------------------------------------------------------===//
728 // Spilling
729 //===----------------------------------------------------------------------===//
730 
731 /// If MI is a load or store of StackSlot, it can be removed.
732 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
733  int FI = 0;
734  Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
735  bool IsLoad = InstrReg;
736  if (!IsLoad)
737  InstrReg = TII.isStoreToStackSlot(*MI, FI);
738 
739  // We have a stack access. Is it the right register and slot?
740  if (InstrReg != Reg || FI != StackSlot)
741  return false;
742 
743  if (!IsLoad)
744  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
745 
746  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
747  LIS.RemoveMachineInstrFromMaps(*MI);
748  MI->eraseFromParent();
749 
750  if (IsLoad) {
751  ++NumReloadsRemoved;
752  --NumReloads;
753  } else {
754  ++NumSpillsRemoved;
755  --NumSpills;
756  }
757 
758  return true;
759 }
760 
761 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
763 // Dump the range of instructions from B to E with their slot indexes.
766  LiveIntervals const &LIS,
767  const char *const header,
768  Register VReg = Register()) {
769  char NextLine = '\n';
770  char SlotIndent = '\t';
771 
772  if (std::next(B) == E) {
773  NextLine = ' ';
774  SlotIndent = ' ';
775  }
776 
777  dbgs() << '\t' << header << ": " << NextLine;
778 
779  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
780  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
781 
782  // If a register was passed in and this instruction has it as a
783  // destination that is marked as an early clobber, print the
784  // early-clobber slot index.
785  if (VReg) {
786  MachineOperand *MO = I->findRegisterDefOperand(VReg);
787  if (MO && MO->isEarlyClobber())
788  Idx = Idx.getRegSlot(true);
789  }
790 
791  dbgs() << SlotIndent << Idx << '\t' << *I;
792  }
793 }
794 #endif
795 
796 /// foldMemoryOperand - Try folding stack slot references in Ops into their
797 /// instructions.
798 ///
799 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
800 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
801 /// @return True on success.
802 bool InlineSpiller::
803 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
804  MachineInstr *LoadMI) {
805  if (Ops.empty())
806  return false;
807  // Don't attempt folding in bundles.
808  MachineInstr *MI = Ops.front().first;
809  if (Ops.back().first != MI || MI->isBundled())
810  return false;
811 
812  bool WasCopy = MI->isCopy();
813  Register ImpReg;
814 
815  // TII::foldMemoryOperand will do what we need here for statepoint
816  // (fold load into use and remove corresponding def). We will replace
817  // uses of removed def with loads (spillAroundUses).
818  // For that to work we need to untie def and use to pass it through
819  // foldMemoryOperand and signal foldPatchpoint that it is allowed to
820  // fold them.
821  bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
822 
823  // Spill subregs if the target allows it.
824  // We always want to spill subregs for stackmap/patchpoint pseudos.
825  bool SpillSubRegs = TII.isSubregFoldable() ||
826  MI->getOpcode() == TargetOpcode::STATEPOINT ||
827  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
828  MI->getOpcode() == TargetOpcode::STACKMAP;
829 
830  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
831  // operands.
832  SmallVector<unsigned, 8> FoldOps;
833  for (const auto &OpPair : Ops) {
834  unsigned Idx = OpPair.second;
835  assert(MI == OpPair.first && "Instruction conflict during operand folding");
836  MachineOperand &MO = MI->getOperand(Idx);
837 
838  // No point restoring an undef read, and we'll produce an invalid live
839  // interval.
840  // TODO: Is this really the correct way to handle undef tied uses?
841  if (MO.isUse() && !MO.readsReg() && !MO.isTied())
842  continue;
843 
844  if (MO.isImplicit()) {
845  ImpReg = MO.getReg();
846  continue;
847  }
848 
849  if (!SpillSubRegs && MO.getSubReg())
850  return false;
851  // We cannot fold a load instruction into a def.
852  if (LoadMI && MO.isDef())
853  return false;
854  // Tied use operands should not be passed to foldMemoryOperand.
855  if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
856  FoldOps.push_back(Idx);
857  }
858 
859  // If we only have implicit uses, we won't be able to fold that.
860  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
861  if (FoldOps.empty())
862  return false;
863 
864  MachineInstrSpan MIS(MI, MI->getParent());
865 
867  if (UntieRegs)
868  for (unsigned Idx : FoldOps) {
869  MachineOperand &MO = MI->getOperand(Idx);
870  if (!MO.isTied())
871  continue;
872  unsigned Tied = MI->findTiedOperandIdx(Idx);
873  if (MO.isUse())
874  TiedOps.emplace_back(Tied, Idx);
875  else {
876  assert(MO.isDef() && "Tied to not use and def?");
877  TiedOps.emplace_back(Idx, Tied);
878  }
879  MI->untieRegOperand(Idx);
880  }
881 
882  MachineInstr *FoldMI =
883  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
884  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
885  if (!FoldMI) {
886  // Re-tie operands.
887  for (auto Tied : TiedOps)
888  MI->tieOperands(Tied.first, Tied.second);
889  return false;
890  }
891 
892  // Remove LIS for any dead defs in the original MI not in FoldMI.
893  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
894  if (!MO->isReg())
895  continue;
896  Register Reg = MO->getReg();
898  continue;
899  }
900  // Skip non-Defs, including undef uses and internal reads.
901  if (MO->isUse())
902  continue;
903  PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
904  if (RI.FullyDefined)
905  continue;
906  // FoldMI does not define this physreg. Remove the LI segment.
907  assert(MO->isDead() && "Cannot fold physreg def");
908  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
909  LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
910  }
911 
912  int FI;
913  if (TII.isStoreToStackSlot(*MI, FI) &&
914  HSpiller.rmFromMergeableSpills(*MI, FI))
915  --NumSpills;
916  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
917  // Update the call site info.
918  if (MI->isCandidateForCallSiteEntry())
919  MI->getMF()->moveCallSiteInfo(MI, FoldMI);
920 
921  // If we've folded a store into an instruction labelled with debug-info,
922  // record a substitution from the old operand to the memory operand. Handle
923  // the simple common case where operand 0 is the one being folded, plus when
924  // the destination operand is also a tied def. More values could be
925  // substituted / preserved with more analysis.
926  if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
927  // Helper lambda.
928  auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
929  // Substitute old operand zero to the new instructions memory operand.
930  unsigned OldOperandNum = Ops[0].second;
931  unsigned NewNum = FoldMI->getDebugInstrNum();
932  unsigned OldNum = MI->getDebugInstrNum();
933  MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
935  };
936 
937  const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
938  if (Ops.size() == 1 && Op0.isDef()) {
939  MakeSubstitution();
940  } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
941  Op0.getReg() == MI->getOperand(1).getReg()) {
942  MakeSubstitution();
943  }
944  } else if (MI->peekDebugInstrNum()) {
945  // This is a debug-labelled instruction, but the operand being folded isn't
946  // at operand zero. Most likely this means it's a load being folded in.
947  // Substitute any register defs from operand zero up to the one being
948  // folded -- past that point, we don't know what the new operand indexes
949  // will be.
950  MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
951  }
952 
953  MI->eraseFromParent();
954 
955  // Insert any new instructions other than FoldMI into the LIS maps.
956  assert(!MIS.empty() && "Unexpected empty span of instructions!");
957  for (MachineInstr &MI : MIS)
958  if (&MI != FoldMI)
960 
961  // TII.foldMemoryOperand may have left some implicit operands on the
962  // instruction. Strip them.
963  if (ImpReg)
964  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
965  MachineOperand &MO = FoldMI->getOperand(i - 1);
966  if (!MO.isReg() || !MO.isImplicit())
967  break;
968  if (MO.getReg() == ImpReg)
969  FoldMI->removeOperand(i - 1);
970  }
971 
972  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
973  "folded"));
974 
975  if (!WasCopy)
976  ++NumFolded;
977  else if (Ops.front().second == 0) {
978  ++NumSpills;
979  // If there is only 1 store instruction is required for spill, add it
980  // to mergeable list. In X86 AMX, 2 intructions are required to store.
981  // We disable the merge for this case.
982  if (std::distance(MIS.begin(), MIS.end()) <= 1)
983  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
984  } else
985  ++NumReloads;
986  return true;
987 }
988 
989 void InlineSpiller::insertReload(Register NewVReg,
990  SlotIndex Idx,
992  MachineBasicBlock &MBB = *MI->getParent();
993 
994  MachineInstrSpan MIS(MI, &MBB);
995  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
996  MRI.getRegClass(NewVReg), &TRI);
997 
998  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
999 
1000  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
1001  NewVReg));
1002  ++NumReloads;
1003 }
1004 
1005 /// Check if \p Def fully defines a VReg with an undefined value.
1006 /// If that's the case, that means the value of VReg is actually
1007 /// not relevant.
1008 static bool isRealSpill(const MachineInstr &Def) {
1009  if (!Def.isImplicitDef())
1010  return true;
1011  assert(Def.getNumOperands() == 1 &&
1012  "Implicit def with more than one definition");
1013  // We can say that the VReg defined by Def is undef, only if it is
1014  // fully defined by Def. Otherwise, some of the lanes may not be
1015  // undef and the value of the VReg matters.
1016  return Def.getOperand(0).getSubReg();
1017 }
1018 
1019 /// insertSpill - Insert a spill of NewVReg after MI.
1020 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1022  // Spill are not terminators, so inserting spills after terminators will
1023  // violate invariants in MachineVerifier.
1024  assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1025  MachineBasicBlock &MBB = *MI->getParent();
1026 
1027  MachineInstrSpan MIS(MI, &MBB);
1028  MachineBasicBlock::iterator SpillBefore = std::next(MI);
1029  bool IsRealSpill = isRealSpill(*MI);
1030 
1031  if (IsRealSpill)
1032  TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1033  MRI.getRegClass(NewVReg), &TRI);
1034  else
1035  // Don't spill undef value.
1036  // Anything works for undef, in particular keeping the memory
1037  // uninitialized is a viable option and it saves code size and
1038  // run time.
1039  BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1040  .addReg(NewVReg, getKillRegState(isKill));
1041 
1042  MachineBasicBlock::iterator Spill = std::next(MI);
1043  LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1044  for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1045  getVDefInterval(MI, LIS);
1046 
1047  LLVM_DEBUG(
1048  dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1049  ++NumSpills;
1050  // If there is only 1 store instruction is required for spill, add it
1051  // to mergeable list. In X86 AMX, 2 intructions are required to store.
1052  // We disable the merge for this case.
1053  if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1054  HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1055 }
1056 
1057 /// spillAroundUses - insert spill code around each use of Reg.
1058 void InlineSpiller::spillAroundUses(Register Reg) {
1059  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1060  LiveInterval &OldLI = LIS.getInterval(Reg);
1061 
1062  // Iterate over instructions using Reg.
1064  // Debug values are not allowed to affect codegen.
1065  if (MI.isDebugValue()) {
1066  // Modify DBG_VALUE now that the value is in a spill slot.
1067  MachineBasicBlock *MBB = MI.getParent();
1068  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI);
1069  buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg);
1070  MBB->erase(MI);
1071  continue;
1072  }
1073 
1074  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
1075  "instruction that isn't a DBG_VALUE");
1076 
1077  // Ignore copies to/from snippets. We'll delete them.
1078  if (SnippetCopies.count(&MI))
1079  continue;
1080 
1081  // Stack slot accesses may coalesce away.
1082  if (coalesceStackAccess(&MI, Reg))
1083  continue;
1084 
1085  // Analyze instruction.
1088 
1089  // Find the slot index where this instruction reads and writes OldLI.
1090  // This is usually the def slot, except for tied early clobbers.
1092  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1093  if (SlotIndex::isSameInstr(Idx, VNI->def))
1094  Idx = VNI->def;
1095 
1096  // Check for a sibling copy.
1097  Register SibReg = isFullCopyOf(MI, Reg);
1098  if (SibReg && isSibling(SibReg)) {
1099  // This may actually be a copy between snippets.
1100  if (isRegToSpill(SibReg)) {
1101  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI);
1102  SnippetCopies.insert(&MI);
1103  continue;
1104  }
1105  if (RI.Writes) {
1106  if (hoistSpillInsideBB(OldLI, MI)) {
1107  // This COPY is now dead, the value is already in the stack slot.
1108  MI.getOperand(0).setIsDead();
1109  DeadDefs.push_back(&MI);
1110  continue;
1111  }
1112  } else {
1113  // This is a reload for a sib-reg copy. Drop spills downstream.
1114  LiveInterval &SibLI = LIS.getInterval(SibReg);
1115  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1116  // The COPY will fold to a reload below.
1117  }
1118  }
1119 
1120  // Attempt to fold memory ops.
1121  if (foldMemoryOperand(Ops))
1122  continue;
1123 
1124  // Create a new virtual register for spill/fill.
1125  // FIXME: Infer regclass from instruction alone.
1126  Register NewVReg = Edit->createFrom(Reg);
1127 
1128  if (RI.Reads)
1129  insertReload(NewVReg, Idx, &MI);
1130 
1131  // Rewrite instruction operands.
1132  bool hasLiveDef = false;
1133  for (const auto &OpPair : Ops) {
1134  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1135  MO.setReg(NewVReg);
1136  if (MO.isUse()) {
1137  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1138  MO.setIsKill();
1139  } else {
1140  if (!MO.isDead())
1141  hasLiveDef = true;
1142  }
1143  }
1144  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n');
1145 
1146  // FIXME: Use a second vreg if instruction has no tied ops.
1147  if (RI.Writes)
1148  if (hasLiveDef)
1149  insertSpill(NewVReg, true, &MI);
1150  }
1151 }
1152 
1153 /// spillAll - Spill all registers remaining after rematerialization.
1154 void InlineSpiller::spillAll() {
1155  // Update LiveStacks now that we are committed to spilling.
1156  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1157  StackSlot = VRM.assignVirt2StackSlot(Original);
1158  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1159  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1160  } else
1161  StackInt = &LSS.getInterval(StackSlot);
1162 
1163  if (Original != Edit->getReg())
1164  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1165 
1166  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1167  for (Register Reg : RegsToSpill)
1168  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1169  StackInt->getValNumInfo(0));
1170  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1171 
1172  // Spill around uses of all RegsToSpill.
1173  for (Register Reg : RegsToSpill)
1174  spillAroundUses(Reg);
1175 
1176  // Hoisted spills may cause dead code.
1177  if (!DeadDefs.empty()) {
1178  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1179  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
1180  }
1181 
1182  // Finally delete the SnippetCopies.
1183  for (Register Reg : RegsToSpill) {
1184  for (MachineInstr &MI :
1186  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1187  // FIXME: Do this with a LiveRangeEdit callback.
1189  MI.eraseFromParent();
1190  }
1191  }
1192 
1193  // Delete all spilled registers.
1194  for (Register Reg : RegsToSpill)
1195  Edit->eraseVirtReg(Reg);
1196 }
1197 
1198 void InlineSpiller::spill(LiveRangeEdit &edit) {
1199  ++NumSpilledRanges;
1200  Edit = &edit;
1201  assert(!Register::isStackSlot(edit.getReg()) &&
1202  "Trying to spill a stack slot.");
1203  // Share a stack slot among all descendants of Original.
1204  Original = VRM.getOriginal(edit.getReg());
1205  StackSlot = VRM.getStackSlot(Original);
1206  StackInt = nullptr;
1207 
1208  LLVM_DEBUG(dbgs() << "Inline spilling "
1210  << ':' << edit.getParent() << "\nFrom original "
1211  << printReg(Original) << '\n');
1212  assert(edit.getParent().isSpillable() &&
1213  "Attempting to spill already spilled value.");
1214  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1215 
1216  collectRegsToSpill();
1217  reMaterializeAll();
1218 
1219  // Remat may handle everything.
1220  if (!RegsToSpill.empty())
1221  spillAll();
1222 
1223  Edit->calculateRegClassAndHint(MF, VRAI);
1224 }
1225 
1226 /// Optimizations after all the reg selections and spills are done.
1227 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1228 
1229 /// When a spill is inserted, add the spill to MergeableSpills map.
1230 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1231  unsigned Original) {
1233  LiveInterval &OrigLI = LIS.getInterval(Original);
1234  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1235  // LiveInterval may be cleared after all its references are spilled.
1236  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1237  auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1238  LI->assign(OrigLI, Allocator);
1239  StackSlotToOrigLI[StackSlot] = std::move(LI);
1240  }
1241  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1242  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1243  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1244  MergeableSpills[MIdx].insert(&Spill);
1245 }
1246 
1247 /// When a spill is removed, remove the spill from MergeableSpills map.
1248 /// Return true if the spill is removed successfully.
1249 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1250  int StackSlot) {
1251  auto It = StackSlotToOrigLI.find(StackSlot);
1252  if (It == StackSlotToOrigLI.end())
1253  return false;
1254  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1255  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1256  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1257  return MergeableSpills[MIdx].erase(&Spill);
1258 }
1259 
1260 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1261 /// i.e., there should be a living sibling of OrigReg at the insert point.
1262 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1263  MachineBasicBlock &BB, Register &LiveReg) {
1264  SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1265  // The original def could be after the last insert point in the root block,
1266  // we can't hoist to here.
1267  if (Idx < OrigVNI.def) {
1268  // TODO: We could be better here. If LI is not alive in landing pad
1269  // we could hoist spill after LIP.
1270  LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1271  return false;
1272  }
1273  Register OrigReg = OrigLI.reg();
1274  SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1275  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1276 
1277  for (const Register &SibReg : Siblings) {
1278  LiveInterval &LI = LIS.getInterval(SibReg);
1279  VNInfo *VNI = LI.getVNInfoAt(Idx);
1280  if (VNI) {
1281  LiveReg = SibReg;
1282  return true;
1283  }
1284  }
1285  return false;
1286 }
1287 
1288 /// Remove redundant spills in the same BB. Save those redundant spills in
1289 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1290 void HoistSpillHelper::rmRedundantSpills(
1292  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1294  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1295  // another spill inside. If a BB contains more than one spill, only keep the
1296  // earlier spill with smaller SlotIndex.
1297  for (auto *const CurrentSpill : Spills) {
1298  MachineBasicBlock *Block = CurrentSpill->getParent();
1299  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1300  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1301  if (PrevSpill) {
1302  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1303  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1304  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1305  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1306  SpillsToRm.push_back(SpillToRm);
1307  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1308  } else {
1309  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1310  }
1311  }
1312  for (auto *const SpillToRm : SpillsToRm)
1313  Spills.erase(SpillToRm);
1314 }
1315 
1316 /// Starting from \p Root find a top-down traversal order of the dominator
1317 /// tree to visit all basic blocks containing the elements of \p Spills.
1318 /// Redundant spills will be found and put into \p SpillsToRm at the same
1319 /// time. \p SpillBBToSpill will be populated as part of the process and
1320 /// maps a basic block to the first store occurring in the basic block.
1321 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1322 void HoistSpillHelper::getVisitOrders(
1325  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1328  // The set contains all the possible BB nodes to which we may hoist
1329  // original spills.
1331  // Save the BB nodes on the path from the first BB node containing
1332  // non-redundant spill to the Root node.
1334  // All the spills to be hoisted must originate from a single def instruction
1335  // to the OrigReg. It means the def instruction should dominate all the spills
1336  // to be hoisted. We choose the BB where the def instruction is located as
1337  // the Root.
1338  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1339  // For every node on the dominator tree with spill, walk up on the dominator
1340  // tree towards the Root node until it is reached. If there is other node
1341  // containing spill in the middle of the path, the previous spill saw will
1342  // be redundant and the node containing it will be removed. All the nodes on
1343  // the path starting from the first node with non-redundant spill to the Root
1344  // node will be added to the WorkSet, which will contain all the possible
1345  // locations where spills may be hoisted to after the loop below is done.
1346  for (auto *const Spill : Spills) {
1347  MachineBasicBlock *Block = Spill->getParent();
1348  MachineDomTreeNode *Node = MDT[Block];
1349  MachineInstr *SpillToRm = nullptr;
1350  while (Node != RootIDomNode) {
1351  // If Node dominates Block, and it already contains a spill, the spill in
1352  // Block will be redundant.
1353  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1354  SpillToRm = SpillBBToSpill[MDT[Block]];
1355  break;
1356  /// If we see the Node already in WorkSet, the path from the Node to
1357  /// the Root node must already be traversed by another spill.
1358  /// Then no need to repeat.
1359  } else if (WorkSet.count(Node)) {
1360  break;
1361  } else {
1362  NodesOnPath.insert(Node);
1363  }
1364  Node = Node->getIDom();
1365  }
1366  if (SpillToRm) {
1367  SpillsToRm.push_back(SpillToRm);
1368  } else {
1369  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1370  // set the initial status before hoisting start. The value of BBs
1371  // containing original spills is set to 0, in order to descriminate
1372  // with BBs containing hoisted spills which will be inserted to
1373  // SpillsToKeep later during hoisting.
1374  SpillsToKeep[MDT[Block]] = 0;
1375  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1376  }
1377  NodesOnPath.clear();
1378  }
1379 
1380  // Sort the nodes in WorkSet in top-down order and save the nodes
1381  // in Orders. Orders will be used for hoisting in runHoistSpills.
1382  unsigned idx = 0;
1383  Orders.push_back(MDT.getBase().getNode(Root));
1384  do {
1385  MachineDomTreeNode *Node = Orders[idx++];
1386  for (MachineDomTreeNode *Child : Node->children()) {
1387  if (WorkSet.count(Child))
1388  Orders.push_back(Child);
1389  }
1390  } while (idx != Orders.size());
1391  assert(Orders.size() == WorkSet.size() &&
1392  "Orders have different size with WorkSet");
1393 
1394 #ifndef NDEBUG
1395  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1397  for (; RIt != Orders.rend(); RIt++)
1398  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1399  LLVM_DEBUG(dbgs() << "\n");
1400 #endif
1401 }
1402 
1403 /// Try to hoist spills according to BB hotness. The spills to removed will
1404 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1405 /// \p SpillsToIns.
1406 void HoistSpillHelper::runHoistSpills(
1407  LiveInterval &OrigLI, VNInfo &OrigVNI,
1409  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1411  // Visit order of dominator tree nodes.
1413  // SpillsToKeep contains all the nodes where spills are to be inserted
1414  // during hoisting. If the spill to be inserted is an original spill
1415  // (not a hoisted one), the value of the map entry is 0. If the spill
1416  // is a hoisted spill, the value of the map entry is the VReg to be used
1417  // as the source of the spill.
1419  // Map from BB to the first spill inside of it.
1421 
1422  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1423 
1424  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1425  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1426  SpillBBToSpill);
1427 
1428  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1429  // nodes set and the cost of all the spills inside those nodes.
1430  // The nodes set are the locations where spills are to be inserted
1431  // in the subtree of current node.
1432  using NodesCostPair =
1433  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1435 
1436  // Iterate Orders set in reverse order, which will be a bottom-up order
1437  // in the dominator tree. Once we visit a dom tree node, we know its
1438  // children have already been visited and the spill locations in the
1439  // subtrees of all the children have been determined.
1441  for (; RIt != Orders.rend(); RIt++) {
1442  MachineBasicBlock *Block = (*RIt)->getBlock();
1443 
1444  // If Block contains an original spill, simply continue.
1445  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1446  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1447  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1448  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1449  continue;
1450  }
1451 
1452  // Collect spills in subtree of current node (*RIt) to
1453  // SpillsInSubTreeMap[*RIt].first.
1454  for (MachineDomTreeNode *Child : (*RIt)->children()) {
1455  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1456  continue;
1457  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1458  // should be placed before getting the begin and end iterators of
1459  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1460  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1461  // and the map grows and then the original buckets in the map are moved.
1462  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1463  SpillsInSubTreeMap[*RIt].first;
1464  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1465  SubTreeCost += SpillsInSubTreeMap[Child].second;
1466  auto BI = SpillsInSubTreeMap[Child].first.begin();
1467  auto EI = SpillsInSubTreeMap[Child].first.end();
1468  SpillsInSubTree.insert(BI, EI);
1469  SpillsInSubTreeMap.erase(Child);
1470  }
1471 
1472  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1473  SpillsInSubTreeMap[*RIt].first;
1474  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1475  // No spills in subtree, simply continue.
1476  if (SpillsInSubTree.empty())
1477  continue;
1478 
1479  // Check whether Block is a possible candidate to insert spill.
1480  Register LiveReg;
1481  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1482  continue;
1483 
1484  // If there are multiple spills that could be merged, bias a little
1485  // to hoist the spill.
1486  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1487  ? BranchProbability(9, 10)
1488  : BranchProbability(1, 1);
1489  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1490  // Hoist: Move spills to current Block.
1491  for (auto *const SpillBB : SpillsInSubTree) {
1492  // When SpillBB is a BB contains original spill, insert the spill
1493  // to SpillsToRm.
1494  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1495  !SpillsToKeep[SpillBB]) {
1496  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1497  SpillsToRm.push_back(SpillToRm);
1498  }
1499  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1500  SpillsToKeep.erase(SpillBB);
1501  }
1502  // Current Block is the BB containing the new hoisted spill. Add it to
1503  // SpillsToKeep. LiveReg is the source of the new spill.
1504  SpillsToKeep[*RIt] = LiveReg;
1505  LLVM_DEBUG({
1506  dbgs() << "spills in BB: ";
1507  for (const auto Rspill : SpillsInSubTree)
1508  dbgs() << Rspill->getBlock()->getNumber() << " ";
1509  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1510  << "\n";
1511  });
1512  SpillsInSubTree.clear();
1513  SpillsInSubTree.insert(*RIt);
1514  SubTreeCost = MBFI.getBlockFreq(Block);
1515  }
1516  }
1517  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1518  // save them to SpillsToIns.
1519  for (const auto &Ent : SpillsToKeep) {
1520  if (Ent.second)
1521  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1522  }
1523 }
1524 
1525 /// For spills with equal values, remove redundant spills and hoist those left
1526 /// to less hot spots.
1527 ///
1528 /// Spills with equal values will be collected into the same set in
1529 /// MergeableSpills when spill is inserted. These equal spills are originated
1530 /// from the same defining instruction and are dominated by the instruction.
1531 /// Before hoisting all the equal spills, redundant spills inside in the same
1532 /// BB are first marked to be deleted. Then starting from the spills left, walk
1533 /// up on the dominator tree towards the Root node where the define instruction
1534 /// is located, mark the dominated spills to be deleted along the way and
1535 /// collect the BB nodes on the path from non-dominated spills to the define
1536 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1537 /// where we are considering to hoist the spills. We iterate the WorkSet in
1538 /// bottom-up order, and for each node, we will decide whether to hoist spills
1539 /// inside its subtree to that node. In this way, we can get benefit locally
1540 /// even if hoisting all the equal spills to one cold place is impossible.
1541 void HoistSpillHelper::hoistAllSpills() {
1542  SmallVector<Register, 4> NewVRegs;
1543  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1544 
1545  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1547  Register Original = VRM.getPreSplitReg(Reg);
1548  if (!MRI.def_empty(Reg))
1549  Virt2SiblingsMap[Original].insert(Reg);
1550  }
1551 
1552  // Each entry in MergeableSpills contains a spill set with equal values.
1553  for (auto &Ent : MergeableSpills) {
1554  int Slot = Ent.first.first;
1555  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1556  VNInfo *OrigVNI = Ent.first.second;
1557  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1558  if (Ent.second.empty())
1559  continue;
1560 
1561  LLVM_DEBUG({
1562  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1563  << "Equal spills in BB: ";
1564  for (const auto spill : EqValSpills)
1565  dbgs() << spill->getParent()->getNumber() << " ";
1566  dbgs() << "\n";
1567  });
1568 
1569  // SpillsToRm is the spill set to be removed from EqValSpills.
1571  // SpillsToIns is the spill set to be newly inserted after hoisting.
1573 
1574  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1575 
1576  LLVM_DEBUG({
1577  dbgs() << "Finally inserted spills in BB: ";
1578  for (const auto &Ispill : SpillsToIns)
1579  dbgs() << Ispill.first->getNumber() << " ";
1580  dbgs() << "\nFinally removed spills in BB: ";
1581  for (const auto Rspill : SpillsToRm)
1582  dbgs() << Rspill->getParent()->getNumber() << " ";
1583  dbgs() << "\n";
1584  });
1585 
1586  // Stack live range update.
1587  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1588  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1589  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1590  StackIntvl.getValNumInfo(0));
1591 
1592  // Insert hoisted spills.
1593  for (auto const &Insert : SpillsToIns) {
1594  MachineBasicBlock *BB = Insert.first;
1595  Register LiveReg = Insert.second;
1596  MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1597  MachineInstrSpan MIS(MII, BB);
1598  TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1599  MRI.getRegClass(LiveReg), &TRI);
1600  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1601  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1602  getVDefInterval(MI, LIS);
1603  ++NumSpills;
1604  }
1605 
1606  // Remove redundant spills or change them to dead instructions.
1607  NumSpills -= SpillsToRm.size();
1608  for (auto *const RMEnt : SpillsToRm) {
1609  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1610  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1611  MachineOperand &MO = RMEnt->getOperand(i - 1);
1612  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1613  RMEnt->removeOperand(i - 1);
1614  }
1615  }
1616  Edit.eliminateDeadDefs(SpillsToRm, std::nullopt);
1617  }
1618 }
1619 
1620 /// For VirtReg clone, the \p New register should have the same physreg or
1621 /// stackslot as the \p old register.
1622 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1623  if (VRM.hasPhys(Old))
1624  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1625  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1626  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1627  else
1628  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1629  if (VRM.hasShape(Old))
1630  VRM.assignVirt2Shape(New, VRM.getShape(Old));
1631 }
i
i
Definition: README.txt:29
llvm::Spiller
Spiller interface.
Definition: Spiller.h:24
llvm::LiveRangeEdit::Remat
Remat - Information needed to rematerialize at a specific location.
Definition: LiveRangeEdit.h:192
llvm::HexagonInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Definition: HexagonInstrInfo.cpp:336
llvm::MachineInstr::getDebugInstrNum
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
Definition: MachineInstr.cpp:2371
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
MachineInstr.h
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:492
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
llvm::ISD::PATCHPOINT
@ PATCHPOINT
Definition: ISDOpcodes.h:1299
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:107
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::VirtRegInfo::Tied
bool Tied
Tied - Uses and defs must use the same register.
Definition: MachineInstrBundle.h:229
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstrSpan
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Definition: MachineBasicBlock.h:1240
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Loops
Hexagon Hardware Loops
Definition: HexagonHardwareLoops.cpp:374
llvm::VirtRegInfo::Reads
bool Reads
Reads - One of the operands read the virtual register.
Definition: MachineInstrBundle.h:221
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:509
llvm::LiveInterval::isSpillable
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:819
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1199
Statistic.h
llvm::VirtRegAuxInfo
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
Definition: CalcSpillWeights.h:45
llvm::LiveInterval::weight
float weight() const
Definition: LiveInterval.h:718
ErrorHandling.h
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::MachineOperand::isTied
bool isTied() const
Definition: MachineOperand.h:440
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
MapVector.h
llvm::MachineRegisterInfo::defusechain_instr_iterator
defusechain_iterator - This class provides iterator support for machine operands in the function that...
Definition: MachineRegisterInfo.h:277
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:140
llvm::VNInfo::def
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::MachineRegisterInfo::use_nodbg_instructions
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:551
llvm::InsertPointAnalysis
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:50
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:237
llvm::PhysRegInfo::FullyDefined
bool FullyDefined
Reg or a super-register is defined.
Definition: MachineInstrBundle.h:256
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::begin
iterator begin()
Definition: DenseMap.h:75
BlockFrequency.h
Spiller.h
DenseMap.h
TargetInstrInfo.h
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::MachineRegisterInfo::getNumVirtRegs
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
Definition: MachineRegisterInfo.h:770
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::Register::index2VirtReg
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
STLExtras.h
llvm::LiveRangeEdit::getReg
Register getReg() const
Definition: LiveRangeEdit.h:147
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::LiveQueryResult
Result of a LiveRange query.
Definition: LiveInterval.h:90
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::LiveRange::assign
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:252
llvm::AnalyzePhysRegInBundle
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
Definition: MachineInstrBundle.cpp:313
llvm::LiveIntervals::getMBBFromIndex
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Definition: LiveIntervals.h:249
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::LiveIntervals::getInstructionIndex
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
Definition: LiveIntervals.h:220
llvm::MachineLoopInfo
Definition: MachineLoopInfo.h:89
MachineRegisterInfo.h
llvm::MachineBasicBlock::erase
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
Definition: MachineBasicBlock.cpp:1314
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
llvm::createInlineSpiller
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
Definition: InlineSpiller.cpp:241
llvm::MachineOperand::isImplicit
bool isImplicit() const
Definition: MachineOperand.h:379
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:98
MachineLoopInfo.h
llvm::MachineBlockFrequencyInfo
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Definition: MachineBlockFrequencyInfo.h:33
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::Register::isStackSlot
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
Definition: Register.h:44
llvm::MachineOperand::isUse
bool isUse() const
Definition: MachineOperand.h:369
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
llvm::buildDbgValueForSpill
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Definition: MachineInstr.cpp:2227
llvm::MachineRegisterInfo::isReserved
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
Definition: MachineRegisterInfo.h:930
llvm::LiveIntervals::InsertMachineInstrInMaps
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
Definition: LiveIntervals.h:260
llvm::DomTreeNodeBase::children
iterator_range< iterator > children()
Definition: GenericDomTree.h:83
SplitKit.h
TargetOpcodes.h
llvm::MachineRegisterInfo::reg_instr_nodbg_end
static reg_instr_nodbg_iterator reg_instr_nodbg_end()
Definition: MachineRegisterInfo.h:357
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
pass
modulo schedule Modulo Schedule test pass
Definition: ModuloSchedule.cpp:2145
llvm::VirtRegInfo::Writes
bool Writes
Writes - One of the operands writes the virtual register.
Definition: MachineInstrBundle.h:224
llvm::MachineRegisterInfo::reg_instructions
iterator_range< reg_instr_iterator > reg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:310
llvm::LiveRangeEdit
Definition: LiveRangeEdit.h:43
MachineInstrBundle.h
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::MIBundleOperands
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
Definition: MachineInstrBundle.h:166
SmallPtrSet.h
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
isFullCopyOf
static Register isFullCopyOf(const MachineInstr &MI, Register Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
Definition: InlineSpiller.cpp:261
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
BranchProbability.h
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::MachineRegisterInfo::def_empty
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
Definition: MachineRegisterInfo.h:434
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:647
llvm::SmallPtrSetImplBase::empty
bool empty() const
Definition: SmallPtrSet.h:92
llvm::Spiller::~Spiller
virtual ~Spiller()=0
llvm::LiveIntervals::removePhysRegDefAt
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
Definition: LiveIntervals.cpp:1712
llvm::cl::opt< bool >
llvm::SlotIndex::getBaseIndex
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:246
llvm::MachineRegisterInfo::reg_bundles
iterator_range< reg_bundle_iterator > reg_bundles(Register Reg) const
Definition: MachineRegisterInfo.h:325
llvm::BlockFrequency
Definition: BlockFrequency.h:23
llvm::LiveRange::getVNInfoBefore
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:274
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::AnalyzeVirtRegInBundle
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned >> *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
Definition: MachineInstrBundle.cpp:283
LiveIntervals.h
VirtRegMap.h
llvm::VNInfo::id
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
llvm::SmallPtrSetImpl::end
iterator end() const
Definition: SmallPtrSet.h:408
llvm::TargetRegisterInfo::getRegClassName
const char * getRegClassName(const TargetRegisterClass *Class) const
Returns the name of the register class.
Definition: TargetRegisterInfo.h:777
llvm::BumpPtrAllocatorImpl
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:63
llvm::HexagonInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index.
Definition: HexagonInstrInfo.cpp:956
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:53
llvm::DenseMap
Definition: DenseMap.h:714
llvm::LiveRange::getNumValNums
unsigned getNumValNums() const
Definition: LiveInterval.h:313
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:384
DisableHoisting
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
llvm::MachineRegisterInfo::reg_instr_nodbg_begin
reg_instr_nodbg_iterator reg_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:354
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:447
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:716
llvm::SmallPtrSetImpl::begin
iterator begin() const
Definition: SmallPtrSet.h:403
llvm::SlotIndex::isSameInstr
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:198
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1868
ArrayRef.h
llvm::LiveRange::Query
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:541
MachineFunctionPass.h
llvm::Register::isVirtualRegister
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineOperand::isEarlyClobber
bool isEarlyClobber() const
Definition: MachineOperand.h:435
llvm::PhysRegInfo
Information about how a physical register Reg is used by a set of operands.
Definition: MachineInstrBundle.h:246
llvm::ISD::STACKMAP
@ STACKMAP
Definition: ISDOpcodes.h:1293
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineBasicBlock::predecessors
iterator_range< pred_iterator > predecessors()
Definition: MachineBasicBlock.h:386
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:259
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:112
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
llvm::MachineFunction
Definition: MachineFunction.h:257
isRealSpill
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
Definition: InlineSpiller.cpp:1008
llvm::AArch64::RM
@ RM
Definition: AArch64ISelLowering.h:487
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:95
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
None.h
llvm::HexagonInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index.
Definition: HexagonInstrInfo.cpp:1001
RestrictStatepointRemat
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::LiveRange::vnis
iterator_range< vni_iterator > vnis()
Definition: LiveInterval.h:230
llvm::MachineOperand::setIsUndef
void setIsUndef(bool Val=true)
Definition: MachineOperand.h:520
llvm::BranchProbability
Definition: BranchProbability.h:30
Compiler.h
TargetSubtargetInfo.h
llvm::MachineOperand::isDef
bool isDef() const
Definition: MachineOperand.h:374
llvm::HexagonInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Definition: HexagonInstrInfo.cpp:288
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:313
llvm::DomTreeNodeBase
Base class for the actual dominator tree node.
Definition: LiveIntervalCalc.h:24
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:93
llvm::MachineOperand::getSubReg
unsigned getSubReg() const
Definition: MachineOperand.h:364
LiveRangeEdit.h
llvm::VNInfo::isPHIDef
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
dumpMachineInstrRangeWithSlotIndex
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
Definition: InlineSpiller.cpp:764
getVDefInterval
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
Definition: InlineSpiller.cpp:271
llvm::StatepointOpers
MI-level Statepoint operands.
Definition: StackMaps.h:158
llvm::LiveRangeEdit::getParent
const LiveInterval & getParent() const
Definition: LiveRangeEdit.h:142
llvm::MachineOperand::readsReg
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
Definition: MachineOperand.h:457
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:84
llvm::pdb::PDB_LocType::Slot
@ Slot
StackMaps.h
llvm::LiveRange::getValNumInfo
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
llvm::LiveIntervals
Definition: LiveIntervals.h:53
llvm::VNInfo
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
llvm::LiveRange::getVNInfoAt
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
llvm::MachineInstr::removeOperand
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
Definition: MachineInstr.cpp:284
LiveInterval.h
llvm::LiveRange::MergeValueInAsValue
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
Definition: LiveInterval.cpp:722
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:546
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:357
SmallVector.h
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:305
MachineInstrBuilder.h
LiveStacks.h
Allocator
Basic Register Allocator
Definition: RegAllocBasic.cpp:143
llvm::MachineOperand::setReg
void setReg(Register Reg)
Change the register this operand corresponds to.
Definition: MachineOperand.cpp:56
llvm::LiveIntervals::RemoveMachineInstrFromMaps
void RemoveMachineInstrFromMaps(MachineInstr &MI)
Definition: LiveIntervals.h:270
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:108
llvm::MachineInstr::getNumOperands
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:519
llvm::LiveIntervals::InsertMachineInstrRangeInMaps
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
Definition: LiveIntervals.h:264
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition: SmallVector.h:677
llvm::LiveInterval::reg
Register reg() const
Definition: LiveInterval.h:717
llvm::SmallVectorImpl< MachineInstr * >
MachineOperand.h
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::VirtRegMap::NO_STACK_SLOT
@ NO_STACK_SLOT
Definition: VirtRegMap.h:37
llvm::LiveRangeEdit::Delegate
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:46
llvm::VNInfo::isUnused
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
llvm::MachineBasicBlock::SkipPHIsLabelsAndDebug
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Definition: MachineBasicBlock.cpp:222
llvm::MachineFunction::DebugOperandMemNumber
const static unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Definition: MachineFunction.h:572
SlotIndexes.h
llvm::cl::desc
Definition: CommandLine.h:413
spill
the custom lowered code happens to be but we shouldn t have to custom lower anything This is probably related to< 2 x i64 > ops being so bad LLVM currently generates stack realignment when it is not necessary needed The problem is that we need to know about stack alignment too before RA runs At that point we don t whether there will be vector spill
Definition: README-SSE.txt:489
llvm::MachineRegisterInfo::reg_nodbg_empty
bool reg_nodbg_empty(Register RegNo) const
reg_nodbg_empty - Return true if the only instructions using or defining Reg are Debug instructions.
Definition: MachineRegisterInfo.h:385
raw_ostream.h
llvm::MachineDominatorTree
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Definition: MachineDominators.h:51
llvm::VirtRegInfo
VirtRegInfo - Information about a virtual register used by a set of operands.
Definition: MachineInstrBundle.h:218
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:111
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::pdb::PDB_SymType::Block
@ Block
llvm::LiveStacks
Definition: LiveStacks.h:35
MachineBlockFrequencyInfo.h
TargetRegisterInfo.h
Debug.h
llvm::LiveQueryResult::isKill
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
llvm::LiveIntervals::getVNInfoAllocator
VNInfo::Allocator & getVNInfoAllocator()
Definition: LiveIntervals.h:278
SetVector.h
MachineDominators.h
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365