LLVM  15.0.0git
InlineSpiller.cpp
Go to the documentation of this file.
1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
41 #include "llvm/CodeGen/Spiller.h"
42 #include "llvm/CodeGen/StackMaps.h"
48 #include "llvm/Config/llvm-config.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
56 #include <cassert>
57 #include <iterator>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "regalloc"
65 
66 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
67 STATISTIC(NumSnippets, "Number of spilled snippets");
68 STATISTIC(NumSpills, "Number of spills inserted");
69 STATISTIC(NumSpillsRemoved, "Number of spills removed");
70 STATISTIC(NumReloads, "Number of reloads inserted");
71 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
72 STATISTIC(NumFolded, "Number of folded stack accesses");
73 STATISTIC(NumFoldedLoads, "Number of folded loads");
74 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
75 
76 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
77  cl::desc("Disable inline spill hoisting"));
78 static cl::opt<bool>
79 RestrictStatepointRemat("restrict-statepoint-remat",
80  cl::init(false), cl::Hidden,
81  cl::desc("Restrict remat for statepoint operands"));
82 
83 namespace {
84 
85 class HoistSpillHelper : private LiveRangeEdit::Delegate {
86  MachineFunction &MF;
87  LiveIntervals &LIS;
88  LiveStacks &LSS;
92  VirtRegMap &VRM;
94  const TargetInstrInfo &TII;
95  const TargetRegisterInfo &TRI;
96  const MachineBlockFrequencyInfo &MBFI;
97 
99 
100  // Map from StackSlot to the LiveInterval of the original register.
101  // Note the LiveInterval of the original register may have been deleted
102  // after it is spilled. We keep a copy here to track the range where
103  // spills can be moved.
105 
106  // Map from pair of (StackSlot and Original VNI) to a set of spills which
107  // have the same stackslot and have equal values defined by Original VNI.
108  // These spills are mergeable and are hoist candiates.
109  using MergeableSpillsMap =
111  MergeableSpillsMap MergeableSpills;
112 
113  /// This is the map from original register to a set containing all its
114  /// siblings. To hoist a spill to another BB, we need to find out a live
115  /// sibling there and use it as the source of the new spill.
117 
118  bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
119  MachineBasicBlock &BB, Register &LiveReg);
120 
121  void rmRedundantSpills(
125 
126  void getVisitOrders(
132 
133  void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
137 
138 public:
139  HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
140  VirtRegMap &vrm)
141  : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
142  LSS(pass.getAnalysis<LiveStacks>()),
143  AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
144  MDT(pass.getAnalysis<MachineDominatorTree>()),
145  Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
146  MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
147  TRI(*mf.getSubtarget().getRegisterInfo()),
148  MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
149  IPA(LIS, mf.getNumBlockIDs()) {}
150 
151  void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
152  unsigned Original);
153  bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
154  void hoistAllSpills();
155  void LRE_DidCloneVirtReg(Register, Register) override;
156 };
157 
158 class InlineSpiller : public Spiller {
159  MachineFunction &MF;
160  LiveIntervals &LIS;
161  LiveStacks &LSS;
162  AliasAnalysis *AA;
165  VirtRegMap &VRM;
167  const TargetInstrInfo &TII;
168  const TargetRegisterInfo &TRI;
169  const MachineBlockFrequencyInfo &MBFI;
170 
171  // Variables that are valid during spill(), but used by multiple methods.
172  LiveRangeEdit *Edit;
173  LiveInterval *StackInt;
174  int StackSlot;
175  Register Original;
176 
177  // All registers to spill to StackSlot, including the main register.
178  SmallVector<Register, 8> RegsToSpill;
179 
180  // All COPY instructions to/from snippets.
181  // They are ignored since both operands refer to the same stack slot.
182  SmallPtrSet<MachineInstr*, 8> SnippetCopies;
183 
184  // Values that failed to remat at some point.
185  SmallPtrSet<VNInfo*, 8> UsedValues;
186 
187  // Dead defs generated during spilling.
189 
190  // Object records spills information and does the hoisting.
191  HoistSpillHelper HSpiller;
192 
193  // Live range weight calculator.
194  VirtRegAuxInfo &VRAI;
195 
196  ~InlineSpiller() override = default;
197 
198 public:
199  InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
200  VirtRegAuxInfo &VRAI)
201  : MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
202  LSS(Pass.getAnalysis<LiveStacks>()),
203  AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
204  MDT(Pass.getAnalysis<MachineDominatorTree>()),
205  Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
206  MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
207  TRI(*MF.getSubtarget().getRegisterInfo()),
208  MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
209  HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
210 
211  void spill(LiveRangeEdit &) override;
212  void postOptimization() override;
213 
214 private:
215  bool isSnippet(const LiveInterval &SnipLI);
216  void collectRegsToSpill();
217 
218  bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
219 
220  bool isSibling(Register Reg);
221  bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
222  void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
223 
224  void markValueUsed(LiveInterval*, VNInfo*);
225  bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
226  bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
227  void reMaterializeAll();
228 
229  bool coalesceStackAccess(MachineInstr *MI, Register Reg);
230  bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
231  MachineInstr *LoadMI = nullptr);
232  void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
233  void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
234 
235  void spillAroundUses(Register Reg);
236  void spillAll();
237 };
238 
239 } // end anonymous namespace
240 
241 Spiller::~Spiller() = default;
242 
243 void Spiller::anchor() {}
244 
246  MachineFunction &MF, VirtRegMap &VRM,
247  VirtRegAuxInfo &VRAI) {
248  return new InlineSpiller(Pass, MF, VRM, VRAI);
249 }
250 
251 //===----------------------------------------------------------------------===//
252 // Snippets
253 //===----------------------------------------------------------------------===//
254 
255 // When spilling a virtual register, we also spill any snippets it is connected
256 // to. The snippets are small live ranges that only have a single real use,
257 // leftovers from live range splitting. Spilling them enables memory operand
258 // folding or tightens the live range around the single use.
259 //
260 // This minimizes register pressure and maximizes the store-to-load distance for
261 // spill slots which can be important in tight loops.
262 
263 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
264 /// otherwise return 0.
266  if (!MI.isFullCopy())
267  return Register();
268  if (MI.getOperand(0).getReg() == Reg)
269  return MI.getOperand(1).getReg();
270  if (MI.getOperand(1).getReg() == Reg)
271  return MI.getOperand(0).getReg();
272  return Register();
273 }
274 
275 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
276  for (const MachineOperand &MO : MI.operands())
277  if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
278  LIS.getInterval(MO.getReg());
279 }
280 
281 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
282 /// It is assumed that SnipLI is a virtual register with the same original as
283 /// Edit->getReg().
284 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
285  Register Reg = Edit->getReg();
286 
287  // A snippet is a tiny live range with only a single instruction using it
288  // besides copies to/from Reg or spills/fills. We accept:
289  //
290  // %snip = COPY %Reg / FILL fi#
291  // %snip = USE %snip
292  // %Reg = COPY %snip / SPILL %snip, fi#
293  //
294  if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
295  return false;
296 
297  MachineInstr *UseMI = nullptr;
298 
299  // Check that all uses satisfy our criteria.
301  RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
303  RI != E;) {
304  MachineInstr &MI = *RI++;
305 
306  // Allow copies to/from Reg.
307  if (isFullCopyOf(MI, Reg))
308  continue;
309 
310  // Allow stack slot loads.
311  int FI;
312  if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
313  continue;
314 
315  // Allow stack slot stores.
316  if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
317  continue;
318 
319  // Allow a single additional instruction.
320  if (UseMI && &MI != UseMI)
321  return false;
322  UseMI = &MI;
323  }
324  return true;
325 }
326 
327 /// collectRegsToSpill - Collect live range snippets that only have a single
328 /// real use.
329 void InlineSpiller::collectRegsToSpill() {
330  Register Reg = Edit->getReg();
331 
332  // Main register always spills.
333  RegsToSpill.assign(1, Reg);
334  SnippetCopies.clear();
335 
336  // Snippets all have the same original, so there can't be any for an original
337  // register.
338  if (Original == Reg)
339  return;
340 
341  for (MachineInstr &MI :
343  Register SnipReg = isFullCopyOf(MI, Reg);
344  if (!isSibling(SnipReg))
345  continue;
346  LiveInterval &SnipLI = LIS.getInterval(SnipReg);
347  if (!isSnippet(SnipLI))
348  continue;
349  SnippetCopies.insert(&MI);
350  if (isRegToSpill(SnipReg))
351  continue;
352  RegsToSpill.push_back(SnipReg);
353  LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
354  ++NumSnippets;
355  }
356 }
357 
358 bool InlineSpiller::isSibling(Register Reg) {
359  return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
360 }
361 
362 /// It is beneficial to spill to earlier place in the same BB in case
363 /// as follows:
364 /// There is an alternative def earlier in the same MBB.
365 /// Hoist the spill as far as possible in SpillMBB. This can ease
366 /// register pressure:
367 ///
368 /// x = def
369 /// y = use x
370 /// s = copy x
371 ///
372 /// Hoisting the spill of s to immediately after the def removes the
373 /// interference between x and y:
374 ///
375 /// x = def
376 /// spill x
377 /// y = use killed x
378 ///
379 /// This hoist only helps when the copy kills its source.
380 ///
381 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
382  MachineInstr &CopyMI) {
383  SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
384 #ifndef NDEBUG
385  VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
386  assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
387 #endif
388 
389  Register SrcReg = CopyMI.getOperand(1).getReg();
390  LiveInterval &SrcLI = LIS.getInterval(SrcReg);
391  VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
392  LiveQueryResult SrcQ = SrcLI.Query(Idx);
393  MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
394  if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
395  return false;
396 
397  // Conservatively extend the stack slot range to the range of the original
398  // value. We may be able to do better with stack slot coloring by being more
399  // careful here.
400  assert(StackInt && "No stack slot assigned yet.");
401  LiveInterval &OrigLI = LIS.getInterval(Original);
402  VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
403  StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
404  LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
405  << *StackInt << '\n');
406 
407  // We are going to spill SrcVNI immediately after its def, so clear out
408  // any later spills of the same value.
409  eliminateRedundantSpills(SrcLI, SrcVNI);
410 
411  MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
413  if (SrcVNI->isPHIDef())
414  MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
415  else {
416  MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
417  assert(DefMI && "Defining instruction disappeared");
418  MII = DefMI;
419  ++MII;
420  }
421  MachineInstrSpan MIS(MII, MBB);
422  // Insert spill without kill flag immediately after def.
423  TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
424  MRI.getRegClass(SrcReg), &TRI);
425  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
426  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
427  getVDefInterval(MI, LIS);
428  --MII; // Point to store instruction.
429  LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
430 
431  // If there is only 1 store instruction is required for spill, add it
432  // to mergeable list. In X86 AMX, 2 intructions are required to store.
433  // We disable the merge for this case.
434  if (MIS.begin() == MII)
435  HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
436  ++NumSpills;
437  return true;
438 }
439 
440 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
441 /// redundant spills of this value in SLI.reg and sibling copies.
442 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
443  assert(VNI && "Missing value");
445  WorkList.push_back(std::make_pair(&SLI, VNI));
446  assert(StackInt && "No stack slot assigned yet.");
447 
448  do {
449  LiveInterval *LI;
450  std::tie(LI, VNI) = WorkList.pop_back_val();
451  Register Reg = LI->reg();
452  LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
453  << VNI->def << " in " << *LI << '\n');
454 
455  // Regs to spill are taken care of.
456  if (isRegToSpill(Reg))
457  continue;
458 
459  // Add all of VNI's live range to StackInt.
460  StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
461  LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
462 
463  // Find all spills and copies of VNI.
464  for (MachineInstr &MI :
466  if (!MI.isCopy() && !MI.mayStore())
467  continue;
468  SlotIndex Idx = LIS.getInstructionIndex(MI);
469  if (LI->getVNInfoAt(Idx) != VNI)
470  continue;
471 
472  // Follow sibling copies down the dominator tree.
473  if (Register DstReg = isFullCopyOf(MI, Reg)) {
474  if (isSibling(DstReg)) {
475  LiveInterval &DstLI = LIS.getInterval(DstReg);
476  VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
477  assert(DstVNI && "Missing defined value");
478  assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
479  WorkList.push_back(std::make_pair(&DstLI, DstVNI));
480  }
481  continue;
482  }
483 
484  // Erase spills.
485  int FI;
486  if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
487  LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
488  // eliminateDeadDefs won't normally remove stores, so switch opcode.
489  MI.setDesc(TII.get(TargetOpcode::KILL));
490  DeadDefs.push_back(&MI);
491  ++NumSpillsRemoved;
492  if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
493  --NumSpills;
494  }
495  }
496  } while (!WorkList.empty());
497 }
498 
499 //===----------------------------------------------------------------------===//
500 // Rematerialization
501 //===----------------------------------------------------------------------===//
502 
503 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
504 /// instruction cannot be eliminated. See through snippet copies
505 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
507  WorkList.push_back(std::make_pair(LI, VNI));
508  do {
509  std::tie(LI, VNI) = WorkList.pop_back_val();
510  if (!UsedValues.insert(VNI).second)
511  continue;
512 
513  if (VNI->isPHIDef()) {
514  MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
515  for (MachineBasicBlock *P : MBB->predecessors()) {
516  VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
517  if (PVNI)
518  WorkList.push_back(std::make_pair(LI, PVNI));
519  }
520  continue;
521  }
522 
523  // Follow snippet copies.
524  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
525  if (!SnippetCopies.count(MI))
526  continue;
527  LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
528  assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
529  VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
530  assert(SnipVNI && "Snippet undefined before copy");
531  WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
532  } while (!WorkList.empty());
533 }
534 
535 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
536  MachineInstr &MI) {
538  return true;
539  // Here's a quick explanation of the problem we're trying to handle here:
540  // * There are some pseudo instructions with more vreg uses than there are
541  // physical registers on the machine.
542  // * This is normally handled by spilling the vreg, and folding the reload
543  // into the user instruction. (Thus decreasing the number of used vregs
544  // until the remainder can be assigned to physregs.)
545  // * However, since we may try to spill vregs in any order, we can end up
546  // trying to spill each operand to the instruction, and then rematting it
547  // instead. When that happens, the new live intervals (for the remats) are
548  // expected to be trivially assignable (i.e. RS_Done). However, since we
549  // may have more remats than physregs, we're guaranteed to fail to assign
550  // one.
551  // At the moment, we only handle this for STATEPOINTs since they're the only
552  // pseudo op where we've seen this. If we start seeing other instructions
553  // with the same problem, we need to revisit this.
554  if (MI.getOpcode() != TargetOpcode::STATEPOINT)
555  return true;
556  // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
557  // that number of physical registers is enough to cover all fixed arguments.
558  // If it is not true we need to revisit it.
559  for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
560  EndIdx = MI.getNumOperands();
561  Idx < EndIdx; ++Idx) {
562  MachineOperand &MO = MI.getOperand(Idx);
563  if (MO.isReg() && MO.getReg() == VReg)
564  return false;
565  }
566  return true;
567 }
568 
569 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
570 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
571  // Analyze instruction
573  VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
574 
575  if (!RI.Reads)
576  return false;
577 
578  SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
579  VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
580 
581  if (!ParentVNI) {
582  LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
583  for (MachineOperand &MO : MI.operands())
584  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
585  MO.setIsUndef();
586  LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
587  return true;
588  }
589 
590  if (SnippetCopies.count(&MI))
591  return false;
592 
593  LiveInterval &OrigLI = LIS.getInterval(Original);
594  VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
595  LiveRangeEdit::Remat RM(ParentVNI);
596  RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
597 
598  if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
599  markValueUsed(&VirtReg, ParentVNI);
600  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
601  return false;
602  }
603 
604  // If the instruction also writes VirtReg.reg, it had better not require the
605  // same register for uses and defs.
606  if (RI.Tied) {
607  markValueUsed(&VirtReg, ParentVNI);
608  LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
609  return false;
610  }
611 
612  // Before rematerializing into a register for a single instruction, try to
613  // fold a load into the instruction. That avoids allocating a new register.
614  if (RM.OrigMI->canFoldAsLoad() &&
615  foldMemoryOperand(Ops, RM.OrigMI)) {
616  Edit->markRematerialized(RM.ParentVNI);
617  ++NumFoldedLoads;
618  return true;
619  }
620 
621  // If we can't guarantee that we'll be able to actually assign the new vreg,
622  // we can't remat.
623  if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
624  markValueUsed(&VirtReg, ParentVNI);
625  LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
626  return false;
627  }
628 
629  // Allocate a new register for the remat.
630  Register NewVReg = Edit->createFrom(Original);
631 
632  // Finally we can rematerialize OrigMI before MI.
633  SlotIndex DefIdx =
634  Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
635 
636  // We take the DebugLoc from MI, since OrigMI may be attributed to a
637  // different source location.
638  auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
639  NewMI->setDebugLoc(MI.getDebugLoc());
640 
641  (void)DefIdx;
642  LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
643  << *LIS.getInstructionFromIndex(DefIdx));
644 
645  // Replace operands
646  for (const auto &OpPair : Ops) {
647  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
648  if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
649  MO.setReg(NewVReg);
650  MO.setIsKill();
651  }
652  }
653  LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
654 
655  ++NumRemats;
656  return true;
657 }
658 
659 /// reMaterializeAll - Try to rematerialize as many uses as possible,
660 /// and trim the live ranges after.
661 void InlineSpiller::reMaterializeAll() {
662  if (!Edit->anyRematerializable(AA))
663  return;
664 
665  UsedValues.clear();
666 
667  // Try to remat before all uses of snippets.
668  bool anyRemat = false;
669  for (Register Reg : RegsToSpill) {
670  LiveInterval &LI = LIS.getInterval(Reg);
672  // Debug values are not allowed to affect codegen.
673  if (MI.isDebugValue())
674  continue;
675 
676  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
677  "instruction that isn't a DBG_VALUE");
678 
679  anyRemat |= reMaterializeFor(LI, MI);
680  }
681  }
682  if (!anyRemat)
683  return;
684 
685  // Remove any values that were completely rematted.
686  for (Register Reg : RegsToSpill) {
687  LiveInterval &LI = LIS.getInterval(Reg);
688  for (VNInfo *VNI : LI.vnis()) {
689  if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
690  continue;
691  MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
692  MI->addRegisterDead(Reg, &TRI);
693  if (!MI->allDefsAreDead())
694  continue;
695  LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
696  DeadDefs.push_back(MI);
697  }
698  }
699 
700  // Eliminate dead code after remat. Note that some snippet copies may be
701  // deleted here.
702  if (DeadDefs.empty())
703  return;
704  LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
705  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
706 
707  // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
708  // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
709  // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
710  // removed, PHI VNI are still left in the LiveInterval.
711  // So to get rid of unused reg, we need to check whether it has non-dbg
712  // reference instead of whether it has non-empty interval.
713  unsigned ResultPos = 0;
714  for (Register Reg : RegsToSpill) {
715  if (MRI.reg_nodbg_empty(Reg)) {
716  Edit->eraseVirtReg(Reg);
717  continue;
718  }
719 
720  assert(LIS.hasInterval(Reg) &&
721  (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
722  "Empty and not used live-range?!");
723 
724  RegsToSpill[ResultPos++] = Reg;
725  }
726  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
727  LLVM_DEBUG(dbgs() << RegsToSpill.size()
728  << " registers to spill after remat.\n");
729 }
730 
731 //===----------------------------------------------------------------------===//
732 // Spilling
733 //===----------------------------------------------------------------------===//
734 
735 /// If MI is a load or store of StackSlot, it can be removed.
736 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
737  int FI = 0;
738  Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
739  bool IsLoad = InstrReg;
740  if (!IsLoad)
741  InstrReg = TII.isStoreToStackSlot(*MI, FI);
742 
743  // We have a stack access. Is it the right register and slot?
744  if (InstrReg != Reg || FI != StackSlot)
745  return false;
746 
747  if (!IsLoad)
748  HSpiller.rmFromMergeableSpills(*MI, StackSlot);
749 
750  LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
751  LIS.RemoveMachineInstrFromMaps(*MI);
752  MI->eraseFromParent();
753 
754  if (IsLoad) {
755  ++NumReloadsRemoved;
756  --NumReloads;
757  } else {
758  ++NumSpillsRemoved;
759  --NumSpills;
760  }
761 
762  return true;
763 }
764 
765 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
767 // Dump the range of instructions from B to E with their slot indexes.
770  LiveIntervals const &LIS,
771  const char *const header,
772  Register VReg = Register()) {
773  char NextLine = '\n';
774  char SlotIndent = '\t';
775 
776  if (std::next(B) == E) {
777  NextLine = ' ';
778  SlotIndent = ' ';
779  }
780 
781  dbgs() << '\t' << header << ": " << NextLine;
782 
783  for (MachineBasicBlock::iterator I = B; I != E; ++I) {
784  SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
785 
786  // If a register was passed in and this instruction has it as a
787  // destination that is marked as an early clobber, print the
788  // early-clobber slot index.
789  if (VReg) {
790  MachineOperand *MO = I->findRegisterDefOperand(VReg);
791  if (MO && MO->isEarlyClobber())
792  Idx = Idx.getRegSlot(true);
793  }
794 
795  dbgs() << SlotIndent << Idx << '\t' << *I;
796  }
797 }
798 #endif
799 
800 /// foldMemoryOperand - Try folding stack slot references in Ops into their
801 /// instructions.
802 ///
803 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
804 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
805 /// @return True on success.
806 bool InlineSpiller::
807 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
808  MachineInstr *LoadMI) {
809  if (Ops.empty())
810  return false;
811  // Don't attempt folding in bundles.
812  MachineInstr *MI = Ops.front().first;
813  if (Ops.back().first != MI || MI->isBundled())
814  return false;
815 
816  bool WasCopy = MI->isCopy();
817  Register ImpReg;
818 
819  // TII::foldMemoryOperand will do what we need here for statepoint
820  // (fold load into use and remove corresponding def). We will replace
821  // uses of removed def with loads (spillAroundUses).
822  // For that to work we need to untie def and use to pass it through
823  // foldMemoryOperand and signal foldPatchpoint that it is allowed to
824  // fold them.
825  bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
826 
827  // Spill subregs if the target allows it.
828  // We always want to spill subregs for stackmap/patchpoint pseudos.
829  bool SpillSubRegs = TII.isSubregFoldable() ||
830  MI->getOpcode() == TargetOpcode::STATEPOINT ||
831  MI->getOpcode() == TargetOpcode::PATCHPOINT ||
832  MI->getOpcode() == TargetOpcode::STACKMAP;
833 
834  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
835  // operands.
836  SmallVector<unsigned, 8> FoldOps;
837  for (const auto &OpPair : Ops) {
838  unsigned Idx = OpPair.second;
839  assert(MI == OpPair.first && "Instruction conflict during operand folding");
840  MachineOperand &MO = MI->getOperand(Idx);
841  if (MO.isImplicit()) {
842  ImpReg = MO.getReg();
843  continue;
844  }
845 
846  if (!SpillSubRegs && MO.getSubReg())
847  return false;
848  // We cannot fold a load instruction into a def.
849  if (LoadMI && MO.isDef())
850  return false;
851  // Tied use operands should not be passed to foldMemoryOperand.
852  if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
853  FoldOps.push_back(Idx);
854  }
855 
856  // If we only have implicit uses, we won't be able to fold that.
857  // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
858  if (FoldOps.empty())
859  return false;
860 
861  MachineInstrSpan MIS(MI, MI->getParent());
862 
864  if (UntieRegs)
865  for (unsigned Idx : FoldOps) {
866  MachineOperand &MO = MI->getOperand(Idx);
867  if (!MO.isTied())
868  continue;
869  unsigned Tied = MI->findTiedOperandIdx(Idx);
870  if (MO.isUse())
871  TiedOps.emplace_back(Tied, Idx);
872  else {
873  assert(MO.isDef() && "Tied to not use and def?");
874  TiedOps.emplace_back(Idx, Tied);
875  }
876  MI->untieRegOperand(Idx);
877  }
878 
879  MachineInstr *FoldMI =
880  LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
881  : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
882  if (!FoldMI) {
883  // Re-tie operands.
884  for (auto Tied : TiedOps)
885  MI->tieOperands(Tied.first, Tied.second);
886  return false;
887  }
888 
889  // Remove LIS for any dead defs in the original MI not in FoldMI.
890  for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
891  if (!MO->isReg())
892  continue;
893  Register Reg = MO->getReg();
895  continue;
896  }
897  // Skip non-Defs, including undef uses and internal reads.
898  if (MO->isUse())
899  continue;
900  PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
901  if (RI.FullyDefined)
902  continue;
903  // FoldMI does not define this physreg. Remove the LI segment.
904  assert(MO->isDead() && "Cannot fold physreg def");
905  SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
906  LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
907  }
908 
909  int FI;
910  if (TII.isStoreToStackSlot(*MI, FI) &&
911  HSpiller.rmFromMergeableSpills(*MI, FI))
912  --NumSpills;
913  LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
914  // Update the call site info.
915  if (MI->isCandidateForCallSiteEntry())
916  MI->getMF()->moveCallSiteInfo(MI, FoldMI);
917 
918  // If we've folded a store into an instruction labelled with debug-info,
919  // record a substitution from the old operand to the memory operand. Handle
920  // the simple common case where operand 0 is the one being folded, plus when
921  // the destination operand is also a tied def. More values could be
922  // substituted / preserved with more analysis.
923  if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
924  // Helper lambda.
925  auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
926  // Substitute old operand zero to the new instructions memory operand.
927  unsigned OldOperandNum = Ops[0].second;
928  unsigned NewNum = FoldMI->getDebugInstrNum();
929  unsigned OldNum = MI->getDebugInstrNum();
930  MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
932  };
933 
934  const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
935  if (Ops.size() == 1 && Op0.isDef()) {
936  MakeSubstitution();
937  } else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
938  Op0.getReg() == MI->getOperand(1).getReg()) {
939  MakeSubstitution();
940  }
941  } else if (MI->peekDebugInstrNum()) {
942  // This is a debug-labelled instruction, but the operand being folded isn't
943  // at operand zero. Most likely this means it's a load being folded in.
944  // Substitute any register defs from operand zero up to the one being
945  // folded -- past that point, we don't know what the new operand indexes
946  // will be.
947  MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
948  }
949 
950  MI->eraseFromParent();
951 
952  // Insert any new instructions other than FoldMI into the LIS maps.
953  assert(!MIS.empty() && "Unexpected empty span of instructions!");
954  for (MachineInstr &MI : MIS)
955  if (&MI != FoldMI)
957 
958  // TII.foldMemoryOperand may have left some implicit operands on the
959  // instruction. Strip them.
960  if (ImpReg)
961  for (unsigned i = FoldMI->getNumOperands(); i; --i) {
962  MachineOperand &MO = FoldMI->getOperand(i - 1);
963  if (!MO.isReg() || !MO.isImplicit())
964  break;
965  if (MO.getReg() == ImpReg)
966  FoldMI->removeOperand(i - 1);
967  }
968 
969  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
970  "folded"));
971 
972  if (!WasCopy)
973  ++NumFolded;
974  else if (Ops.front().second == 0) {
975  ++NumSpills;
976  // If there is only 1 store instruction is required for spill, add it
977  // to mergeable list. In X86 AMX, 2 intructions are required to store.
978  // We disable the merge for this case.
979  if (std::distance(MIS.begin(), MIS.end()) <= 1)
980  HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
981  } else
982  ++NumReloads;
983  return true;
984 }
985 
986 void InlineSpiller::insertReload(Register NewVReg,
987  SlotIndex Idx,
989  MachineBasicBlock &MBB = *MI->getParent();
990 
991  MachineInstrSpan MIS(MI, &MBB);
992  TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
993  MRI.getRegClass(NewVReg), &TRI);
994 
995  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
996 
997  LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
998  NewVReg));
999  ++NumReloads;
1000 }
1001 
1002 /// Check if \p Def fully defines a VReg with an undefined value.
1003 /// If that's the case, that means the value of VReg is actually
1004 /// not relevant.
1005 static bool isRealSpill(const MachineInstr &Def) {
1006  if (!Def.isImplicitDef())
1007  return true;
1008  assert(Def.getNumOperands() == 1 &&
1009  "Implicit def with more than one definition");
1010  // We can say that the VReg defined by Def is undef, only if it is
1011  // fully defined by Def. Otherwise, some of the lanes may not be
1012  // undef and the value of the VReg matters.
1013  return Def.getOperand(0).getSubReg();
1014 }
1015 
1016 /// insertSpill - Insert a spill of NewVReg after MI.
1017 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
1019  // Spill are not terminators, so inserting spills after terminators will
1020  // violate invariants in MachineVerifier.
1021  assert(!MI->isTerminator() && "Inserting a spill after a terminator");
1022  MachineBasicBlock &MBB = *MI->getParent();
1023 
1024  MachineInstrSpan MIS(MI, &MBB);
1025  MachineBasicBlock::iterator SpillBefore = std::next(MI);
1026  bool IsRealSpill = isRealSpill(*MI);
1027 
1028  if (IsRealSpill)
1029  TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1030  MRI.getRegClass(NewVReg), &TRI);
1031  else
1032  // Don't spill undef value.
1033  // Anything works for undef, in particular keeping the memory
1034  // uninitialized is a viable option and it saves code size and
1035  // run time.
1036  BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1037  .addReg(NewVReg, getKillRegState(isKill));
1038 
1039  MachineBasicBlock::iterator Spill = std::next(MI);
1040  LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1041  for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1042  getVDefInterval(MI, LIS);
1043 
1044  LLVM_DEBUG(
1045  dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1046  ++NumSpills;
1047  // If there is only 1 store instruction is required for spill, add it
1048  // to mergeable list. In X86 AMX, 2 intructions are required to store.
1049  // We disable the merge for this case.
1050  if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1051  HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1052 }
1053 
1054 /// spillAroundUses - insert spill code around each use of Reg.
1055 void InlineSpiller::spillAroundUses(Register Reg) {
1056  LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1057  LiveInterval &OldLI = LIS.getInterval(Reg);
1058 
1059  // Iterate over instructions using Reg.
1061  // Debug values are not allowed to affect codegen.
1062  if (MI.isDebugValue()) {
1063  // Modify DBG_VALUE now that the value is in a spill slot.
1064  MachineBasicBlock *MBB = MI.getParent();
1065  LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << MI);
1066  buildDbgValueForSpill(*MBB, &MI, MI, StackSlot, Reg);
1067  MBB->erase(MI);
1068  continue;
1069  }
1070 
1071  assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
1072  "instruction that isn't a DBG_VALUE");
1073 
1074  // Ignore copies to/from snippets. We'll delete them.
1075  if (SnippetCopies.count(&MI))
1076  continue;
1077 
1078  // Stack slot accesses may coalesce away.
1079  if (coalesceStackAccess(&MI, Reg))
1080  continue;
1081 
1082  // Analyze instruction.
1085 
1086  // Find the slot index where this instruction reads and writes OldLI.
1087  // This is usually the def slot, except for tied early clobbers.
1089  if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1090  if (SlotIndex::isSameInstr(Idx, VNI->def))
1091  Idx = VNI->def;
1092 
1093  // Check for a sibling copy.
1094  Register SibReg = isFullCopyOf(MI, Reg);
1095  if (SibReg && isSibling(SibReg)) {
1096  // This may actually be a copy between snippets.
1097  if (isRegToSpill(SibReg)) {
1098  LLVM_DEBUG(dbgs() << "Found new snippet copy: " << MI);
1099  SnippetCopies.insert(&MI);
1100  continue;
1101  }
1102  if (RI.Writes) {
1103  if (hoistSpillInsideBB(OldLI, MI)) {
1104  // This COPY is now dead, the value is already in the stack slot.
1105  MI.getOperand(0).setIsDead();
1106  DeadDefs.push_back(&MI);
1107  continue;
1108  }
1109  } else {
1110  // This is a reload for a sib-reg copy. Drop spills downstream.
1111  LiveInterval &SibLI = LIS.getInterval(SibReg);
1112  eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1113  // The COPY will fold to a reload below.
1114  }
1115  }
1116 
1117  // Attempt to fold memory ops.
1118  if (foldMemoryOperand(Ops))
1119  continue;
1120 
1121  // Create a new virtual register for spill/fill.
1122  // FIXME: Infer regclass from instruction alone.
1123  Register NewVReg = Edit->createFrom(Reg);
1124 
1125  if (RI.Reads)
1126  insertReload(NewVReg, Idx, &MI);
1127 
1128  // Rewrite instruction operands.
1129  bool hasLiveDef = false;
1130  for (const auto &OpPair : Ops) {
1131  MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1132  MO.setReg(NewVReg);
1133  if (MO.isUse()) {
1134  if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1135  MO.setIsKill();
1136  } else {
1137  if (!MO.isDead())
1138  hasLiveDef = true;
1139  }
1140  }
1141  LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << MI << '\n');
1142 
1143  // FIXME: Use a second vreg if instruction has no tied ops.
1144  if (RI.Writes)
1145  if (hasLiveDef)
1146  insertSpill(NewVReg, true, &MI);
1147  }
1148 }
1149 
1150 /// spillAll - Spill all registers remaining after rematerialization.
1151 void InlineSpiller::spillAll() {
1152  // Update LiveStacks now that we are committed to spilling.
1153  if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1154  StackSlot = VRM.assignVirt2StackSlot(Original);
1155  StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1156  StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1157  } else
1158  StackInt = &LSS.getInterval(StackSlot);
1159 
1160  if (Original != Edit->getReg())
1161  VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1162 
1163  assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1164  for (Register Reg : RegsToSpill)
1165  StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1166  StackInt->getValNumInfo(0));
1167  LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1168 
1169  // Spill around uses of all RegsToSpill.
1170  for (Register Reg : RegsToSpill)
1171  spillAroundUses(Reg);
1172 
1173  // Hoisted spills may cause dead code.
1174  if (!DeadDefs.empty()) {
1175  LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1176  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1177  }
1178 
1179  // Finally delete the SnippetCopies.
1180  for (Register Reg : RegsToSpill) {
1181  for (MachineInstr &MI :
1183  assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1184  // FIXME: Do this with a LiveRangeEdit callback.
1186  MI.eraseFromParent();
1187  }
1188  }
1189 
1190  // Delete all spilled registers.
1191  for (Register Reg : RegsToSpill)
1192  Edit->eraseVirtReg(Reg);
1193 }
1194 
1195 void InlineSpiller::spill(LiveRangeEdit &edit) {
1196  ++NumSpilledRanges;
1197  Edit = &edit;
1198  assert(!Register::isStackSlot(edit.getReg()) &&
1199  "Trying to spill a stack slot.");
1200  // Share a stack slot among all descendants of Original.
1201  Original = VRM.getOriginal(edit.getReg());
1202  StackSlot = VRM.getStackSlot(Original);
1203  StackInt = nullptr;
1204 
1205  LLVM_DEBUG(dbgs() << "Inline spilling "
1207  << ':' << edit.getParent() << "\nFrom original "
1208  << printReg(Original) << '\n');
1209  assert(edit.getParent().isSpillable() &&
1210  "Attempting to spill already spilled value.");
1211  assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1212 
1213  collectRegsToSpill();
1214  reMaterializeAll();
1215 
1216  // Remat may handle everything.
1217  if (!RegsToSpill.empty())
1218  spillAll();
1219 
1220  Edit->calculateRegClassAndHint(MF, VRAI);
1221 }
1222 
1223 /// Optimizations after all the reg selections and spills are done.
1224 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1225 
1226 /// When a spill is inserted, add the spill to MergeableSpills map.
1227 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1228  unsigned Original) {
1230  LiveInterval &OrigLI = LIS.getInterval(Original);
1231  // save a copy of LiveInterval in StackSlotToOrigLI because the original
1232  // LiveInterval may be cleared after all its references are spilled.
1233  if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1234  auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1235  LI->assign(OrigLI, Allocator);
1236  StackSlotToOrigLI[StackSlot] = std::move(LI);
1237  }
1238  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1239  VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1240  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1241  MergeableSpills[MIdx].insert(&Spill);
1242 }
1243 
1244 /// When a spill is removed, remove the spill from MergeableSpills map.
1245 /// Return true if the spill is removed successfully.
1246 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1247  int StackSlot) {
1248  auto It = StackSlotToOrigLI.find(StackSlot);
1249  if (It == StackSlotToOrigLI.end())
1250  return false;
1251  SlotIndex Idx = LIS.getInstructionIndex(Spill);
1252  VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1253  std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1254  return MergeableSpills[MIdx].erase(&Spill);
1255 }
1256 
1257 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1258 /// i.e., there should be a living sibling of OrigReg at the insert point.
1259 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1260  MachineBasicBlock &BB, Register &LiveReg) {
1261  SlotIndex Idx = IPA.getLastInsertPoint(OrigLI, BB);
1262  // The original def could be after the last insert point in the root block,
1263  // we can't hoist to here.
1264  if (Idx < OrigVNI.def) {
1265  // TODO: We could be better here. If LI is not alive in landing pad
1266  // we could hoist spill after LIP.
1267  LLVM_DEBUG(dbgs() << "can't spill in root block - def after LIP\n");
1268  return false;
1269  }
1270  Register OrigReg = OrigLI.reg();
1271  SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1272  assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1273 
1274  for (const Register &SibReg : Siblings) {
1275  LiveInterval &LI = LIS.getInterval(SibReg);
1276  VNInfo *VNI = LI.getVNInfoAt(Idx);
1277  if (VNI) {
1278  LiveReg = SibReg;
1279  return true;
1280  }
1281  }
1282  return false;
1283 }
1284 
1285 /// Remove redundant spills in the same BB. Save those redundant spills in
1286 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
1287 void HoistSpillHelper::rmRedundantSpills(
1289  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1291  // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1292  // another spill inside. If a BB contains more than one spill, only keep the
1293  // earlier spill with smaller SlotIndex.
1294  for (const auto CurrentSpill : Spills) {
1295  MachineBasicBlock *Block = CurrentSpill->getParent();
1296  MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1297  MachineInstr *PrevSpill = SpillBBToSpill[Node];
1298  if (PrevSpill) {
1299  SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1300  SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1301  MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1302  MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1303  SpillsToRm.push_back(SpillToRm);
1304  SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1305  } else {
1306  SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1307  }
1308  }
1309  for (const auto SpillToRm : SpillsToRm)
1310  Spills.erase(SpillToRm);
1311 }
1312 
1313 /// Starting from \p Root find a top-down traversal order of the dominator
1314 /// tree to visit all basic blocks containing the elements of \p Spills.
1315 /// Redundant spills will be found and put into \p SpillsToRm at the same
1316 /// time. \p SpillBBToSpill will be populated as part of the process and
1317 /// maps a basic block to the first store occurring in the basic block.
1318 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
1319 void HoistSpillHelper::getVisitOrders(
1322  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1325  // The set contains all the possible BB nodes to which we may hoist
1326  // original spills.
1328  // Save the BB nodes on the path from the first BB node containing
1329  // non-redundant spill to the Root node.
1331  // All the spills to be hoisted must originate from a single def instruction
1332  // to the OrigReg. It means the def instruction should dominate all the spills
1333  // to be hoisted. We choose the BB where the def instruction is located as
1334  // the Root.
1335  MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1336  // For every node on the dominator tree with spill, walk up on the dominator
1337  // tree towards the Root node until it is reached. If there is other node
1338  // containing spill in the middle of the path, the previous spill saw will
1339  // be redundant and the node containing it will be removed. All the nodes on
1340  // the path starting from the first node with non-redundant spill to the Root
1341  // node will be added to the WorkSet, which will contain all the possible
1342  // locations where spills may be hoisted to after the loop below is done.
1343  for (const auto Spill : Spills) {
1344  MachineBasicBlock *Block = Spill->getParent();
1345  MachineDomTreeNode *Node = MDT[Block];
1346  MachineInstr *SpillToRm = nullptr;
1347  while (Node != RootIDomNode) {
1348  // If Node dominates Block, and it already contains a spill, the spill in
1349  // Block will be redundant.
1350  if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1351  SpillToRm = SpillBBToSpill[MDT[Block]];
1352  break;
1353  /// If we see the Node already in WorkSet, the path from the Node to
1354  /// the Root node must already be traversed by another spill.
1355  /// Then no need to repeat.
1356  } else if (WorkSet.count(Node)) {
1357  break;
1358  } else {
1359  NodesOnPath.insert(Node);
1360  }
1361  Node = Node->getIDom();
1362  }
1363  if (SpillToRm) {
1364  SpillsToRm.push_back(SpillToRm);
1365  } else {
1366  // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1367  // set the initial status before hoisting start. The value of BBs
1368  // containing original spills is set to 0, in order to descriminate
1369  // with BBs containing hoisted spills which will be inserted to
1370  // SpillsToKeep later during hoisting.
1371  SpillsToKeep[MDT[Block]] = 0;
1372  WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1373  }
1374  NodesOnPath.clear();
1375  }
1376 
1377  // Sort the nodes in WorkSet in top-down order and save the nodes
1378  // in Orders. Orders will be used for hoisting in runHoistSpills.
1379  unsigned idx = 0;
1380  Orders.push_back(MDT.getBase().getNode(Root));
1381  do {
1382  MachineDomTreeNode *Node = Orders[idx++];
1383  for (MachineDomTreeNode *Child : Node->children()) {
1384  if (WorkSet.count(Child))
1385  Orders.push_back(Child);
1386  }
1387  } while (idx != Orders.size());
1388  assert(Orders.size() == WorkSet.size() &&
1389  "Orders have different size with WorkSet");
1390 
1391 #ifndef NDEBUG
1392  LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1394  for (; RIt != Orders.rend(); RIt++)
1395  LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1396  LLVM_DEBUG(dbgs() << "\n");
1397 #endif
1398 }
1399 
1400 /// Try to hoist spills according to BB hotness. The spills to removed will
1401 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1402 /// \p SpillsToIns.
1403 void HoistSpillHelper::runHoistSpills(
1404  LiveInterval &OrigLI, VNInfo &OrigVNI,
1406  SmallVectorImpl<MachineInstr *> &SpillsToRm,
1408  // Visit order of dominator tree nodes.
1410  // SpillsToKeep contains all the nodes where spills are to be inserted
1411  // during hoisting. If the spill to be inserted is an original spill
1412  // (not a hoisted one), the value of the map entry is 0. If the spill
1413  // is a hoisted spill, the value of the map entry is the VReg to be used
1414  // as the source of the spill.
1416  // Map from BB to the first spill inside of it.
1418 
1419  rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1420 
1421  MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1422  getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1423  SpillBBToSpill);
1424 
1425  // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1426  // nodes set and the cost of all the spills inside those nodes.
1427  // The nodes set are the locations where spills are to be inserted
1428  // in the subtree of current node.
1429  using NodesCostPair =
1430  std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1432 
1433  // Iterate Orders set in reverse order, which will be a bottom-up order
1434  // in the dominator tree. Once we visit a dom tree node, we know its
1435  // children have already been visited and the spill locations in the
1436  // subtrees of all the children have been determined.
1438  for (; RIt != Orders.rend(); RIt++) {
1439  MachineBasicBlock *Block = (*RIt)->getBlock();
1440 
1441  // If Block contains an original spill, simply continue.
1442  if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1443  SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1444  // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1445  SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1446  continue;
1447  }
1448 
1449  // Collect spills in subtree of current node (*RIt) to
1450  // SpillsInSubTreeMap[*RIt].first.
1451  for (MachineDomTreeNode *Child : (*RIt)->children()) {
1452  if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1453  continue;
1454  // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1455  // should be placed before getting the begin and end iterators of
1456  // SpillsInSubTreeMap[Child].first, or else the iterators may be
1457  // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1458  // and the map grows and then the original buckets in the map are moved.
1459  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1460  SpillsInSubTreeMap[*RIt].first;
1461  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1462  SubTreeCost += SpillsInSubTreeMap[Child].second;
1463  auto BI = SpillsInSubTreeMap[Child].first.begin();
1464  auto EI = SpillsInSubTreeMap[Child].first.end();
1465  SpillsInSubTree.insert(BI, EI);
1466  SpillsInSubTreeMap.erase(Child);
1467  }
1468 
1469  SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1470  SpillsInSubTreeMap[*RIt].first;
1471  BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1472  // No spills in subtree, simply continue.
1473  if (SpillsInSubTree.empty())
1474  continue;
1475 
1476  // Check whether Block is a possible candidate to insert spill.
1477  Register LiveReg;
1478  if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1479  continue;
1480 
1481  // If there are multiple spills that could be merged, bias a little
1482  // to hoist the spill.
1483  BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1484  ? BranchProbability(9, 10)
1485  : BranchProbability(1, 1);
1486  if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1487  // Hoist: Move spills to current Block.
1488  for (const auto SpillBB : SpillsInSubTree) {
1489  // When SpillBB is a BB contains original spill, insert the spill
1490  // to SpillsToRm.
1491  if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1492  !SpillsToKeep[SpillBB]) {
1493  MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1494  SpillsToRm.push_back(SpillToRm);
1495  }
1496  // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1497  SpillsToKeep.erase(SpillBB);
1498  }
1499  // Current Block is the BB containing the new hoisted spill. Add it to
1500  // SpillsToKeep. LiveReg is the source of the new spill.
1501  SpillsToKeep[*RIt] = LiveReg;
1502  LLVM_DEBUG({
1503  dbgs() << "spills in BB: ";
1504  for (const auto Rspill : SpillsInSubTree)
1505  dbgs() << Rspill->getBlock()->getNumber() << " ";
1506  dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1507  << "\n";
1508  });
1509  SpillsInSubTree.clear();
1510  SpillsInSubTree.insert(*RIt);
1511  SubTreeCost = MBFI.getBlockFreq(Block);
1512  }
1513  }
1514  // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1515  // save them to SpillsToIns.
1516  for (const auto &Ent : SpillsToKeep) {
1517  if (Ent.second)
1518  SpillsToIns[Ent.first->getBlock()] = Ent.second;
1519  }
1520 }
1521 
1522 /// For spills with equal values, remove redundant spills and hoist those left
1523 /// to less hot spots.
1524 ///
1525 /// Spills with equal values will be collected into the same set in
1526 /// MergeableSpills when spill is inserted. These equal spills are originated
1527 /// from the same defining instruction and are dominated by the instruction.
1528 /// Before hoisting all the equal spills, redundant spills inside in the same
1529 /// BB are first marked to be deleted. Then starting from the spills left, walk
1530 /// up on the dominator tree towards the Root node where the define instruction
1531 /// is located, mark the dominated spills to be deleted along the way and
1532 /// collect the BB nodes on the path from non-dominated spills to the define
1533 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1534 /// where we are considering to hoist the spills. We iterate the WorkSet in
1535 /// bottom-up order, and for each node, we will decide whether to hoist spills
1536 /// inside its subtree to that node. In this way, we can get benefit locally
1537 /// even if hoisting all the equal spills to one cold place is impossible.
1538 void HoistSpillHelper::hoistAllSpills() {
1539  SmallVector<Register, 4> NewVRegs;
1540  LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1541 
1542  for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1544  Register Original = VRM.getPreSplitReg(Reg);
1545  if (!MRI.def_empty(Reg))
1546  Virt2SiblingsMap[Original].insert(Reg);
1547  }
1548 
1549  // Each entry in MergeableSpills contains a spill set with equal values.
1550  for (auto &Ent : MergeableSpills) {
1551  int Slot = Ent.first.first;
1552  LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1553  VNInfo *OrigVNI = Ent.first.second;
1554  SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1555  if (Ent.second.empty())
1556  continue;
1557 
1558  LLVM_DEBUG({
1559  dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1560  << "Equal spills in BB: ";
1561  for (const auto spill : EqValSpills)
1562  dbgs() << spill->getParent()->getNumber() << " ";
1563  dbgs() << "\n";
1564  });
1565 
1566  // SpillsToRm is the spill set to be removed from EqValSpills.
1568  // SpillsToIns is the spill set to be newly inserted after hoisting.
1570 
1571  runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1572 
1573  LLVM_DEBUG({
1574  dbgs() << "Finally inserted spills in BB: ";
1575  for (const auto &Ispill : SpillsToIns)
1576  dbgs() << Ispill.first->getNumber() << " ";
1577  dbgs() << "\nFinally removed spills in BB: ";
1578  for (const auto Rspill : SpillsToRm)
1579  dbgs() << Rspill->getParent()->getNumber() << " ";
1580  dbgs() << "\n";
1581  });
1582 
1583  // Stack live range update.
1584  LiveInterval &StackIntvl = LSS.getInterval(Slot);
1585  if (!SpillsToIns.empty() || !SpillsToRm.empty())
1586  StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1587  StackIntvl.getValNumInfo(0));
1588 
1589  // Insert hoisted spills.
1590  for (auto const &Insert : SpillsToIns) {
1591  MachineBasicBlock *BB = Insert.first;
1592  Register LiveReg = Insert.second;
1593  MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1594  MachineInstrSpan MIS(MII, BB);
1595  TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1596  MRI.getRegClass(LiveReg), &TRI);
1597  LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1598  for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1599  getVDefInterval(MI, LIS);
1600  ++NumSpills;
1601  }
1602 
1603  // Remove redundant spills or change them to dead instructions.
1604  NumSpills -= SpillsToRm.size();
1605  for (auto const RMEnt : SpillsToRm) {
1606  RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1607  for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1608  MachineOperand &MO = RMEnt->getOperand(i - 1);
1609  if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1610  RMEnt->removeOperand(i - 1);
1611  }
1612  }
1613  Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1614  }
1615 }
1616 
1617 /// For VirtReg clone, the \p New register should have the same physreg or
1618 /// stackslot as the \p old register.
1619 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1620  if (VRM.hasPhys(Old))
1621  VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1622  else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1623  VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1624  else
1625  llvm_unreachable("VReg should be assigned either physreg or stackslot");
1626  if (VRM.hasShape(Old))
1627  VRM.assignVirt2Shape(New, VRM.getShape(Old));
1628 }
i
i
Definition: README.txt:29
llvm::Spiller
Spiller interface.
Definition: Spiller.h:24
llvm::LiveRangeEdit::Remat
Remat - Information needed to rematerialize at a specific location.
Definition: LiveRangeEdit.h:195
llvm::HexagonInstrInfo::isStoreToStackSlot
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
Definition: HexagonInstrInfo.cpp:334
llvm::MachineInstr::getDebugInstrNum
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
Definition: MachineInstr.cpp:2323
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
MachineInstr.h
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:494
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:103
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::VirtRegInfo::Tied
bool Tied
Tied - Uses and defs must use the same register.
Definition: MachineInstrBundle.h:229
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::MachineInstrSpan
MachineInstrSpan provides an interface to get an iteration range containing the instruction it was in...
Definition: MachineBasicBlock.h:1203
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Loops
Hexagon Hardware Loops
Definition: HexagonHardwareLoops.cpp:372
llvm::VirtRegInfo::Reads
bool Reads
Reads - One of the operands read the virtual register.
Definition: MachineInstrBundle.h:221
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:509
llvm::LiveInterval::isSpillable
bool isSpillable() const
isSpillable - Can this interval be spilled?
Definition: LiveInterval.h:819
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
Statistic.h
llvm::VirtRegAuxInfo
Calculate auxiliary information for a virtual register such as its spill weight and allocation hint.
Definition: CalcSpillWeights.h:45
llvm::LiveInterval::weight
float weight() const
Definition: LiveInterval.h:718
ErrorHandling.h
llvm::VirtRegMap
Definition: VirtRegMap.h:33
llvm::MachineOperand::isTied
bool isTied() const
Definition: MachineOperand.h:440
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
MapVector.h
llvm::MachineRegisterInfo::defusechain_instr_iterator
defusechain_iterator - This class provides iterator support for machine operands in the function that...
Definition: MachineRegisterInfo.h:277
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::VNInfo::def
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:304
llvm::MachineRegisterInfo::use_nodbg_instructions
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:551
llvm::InsertPointAnalysis
Determines the latest safe point in a block in which we can insert a split, spill or other instructio...
Definition: SplitKit.h:51
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:234
llvm::PhysRegInfo::FullyDefined
bool FullyDefined
Reg or a super-register is defined.
Definition: MachineInstrBundle.h:256
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::begin
iterator begin()
Definition: DenseMap.h:75
BlockFrequency.h
Spiller.h
DenseMap.h
TargetInstrInfo.h
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::MachineRegisterInfo::getNumVirtRegs
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
Definition: MachineRegisterInfo.h:765
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::Register::index2VirtReg
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:654
llvm::LiveRangeEdit::getReg
Register getReg() const
Definition: LiveRangeEdit.h:149
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::LiveQueryResult
Result of a LiveRange query.
Definition: LiveInterval.h:90
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1620
llvm::LiveRange::assign
void assign(const LiveRange &Other, BumpPtrAllocator &Allocator)
Copies values numbers and live segments from Other into this range.
Definition: LiveInterval.h:252
llvm::AnalyzePhysRegInBundle
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg, const TargetRegisterInfo *TRI)
AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses a physical register.
Definition: MachineInstrBundle.cpp:313
llvm::LiveIntervals::getMBBFromIndex
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
Definition: LiveIntervals.h:255
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::LiveIntervals::getInstructionIndex
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
Definition: LiveIntervals.h:226
llvm::MachineLoopInfo
Definition: MachineLoopInfo.h:89
MachineRegisterInfo.h
llvm::MachineBasicBlock::erase
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
Definition: MachineBasicBlock.cpp:1295
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
llvm::createInlineSpiller
Spiller * createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI)
Create and return a spiller that will insert spill code directly instead of deferring though VirtRegM...
Definition: InlineSpiller.cpp:245
llvm::MachineOperand::isImplicit
bool isImplicit() const
Definition: MachineOperand.h:379
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
MachineLoopInfo.h
llvm::MachineBlockFrequencyInfo
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Definition: MachineBlockFrequencyInfo.h:33
llvm::AAResults
Definition: AliasAnalysis.h:511
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::Register::isStackSlot
static bool isStackSlot(unsigned Reg)
isStackSlot - Sometimes it is useful the be able to store a non-negative frame index in a variable th...
Definition: Register.h:44
llvm::MachineOperand::isUse
bool isUse() const
Definition: MachineOperand.h:369
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
llvm::buildDbgValueForSpill
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Definition: MachineInstr.cpp:2179
llvm::MachineRegisterInfo::isReserved
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
Definition: MachineRegisterInfo.h:925
llvm::LiveIntervals::InsertMachineInstrInMaps
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
Definition: LiveIntervals.h:266
llvm::DomTreeNodeBase::children
iterator_range< iterator > children()
Definition: GenericDomTree.h:83
SplitKit.h
TargetOpcodes.h
llvm::MachineRegisterInfo::reg_instr_nodbg_end
static reg_instr_nodbg_iterator reg_instr_nodbg_end()
Definition: MachineRegisterInfo.h:357
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:127
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
pass
modulo schedule Modulo Schedule test pass
Definition: ModuloSchedule.cpp:2103
llvm::VirtRegInfo::Writes
bool Writes
Writes - One of the operands writes the virtual register.
Definition: MachineInstrBundle.h:224
llvm::MachineRegisterInfo::reg_instructions
iterator_range< reg_instr_iterator > reg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:310
llvm::LiveRangeEdit
Definition: LiveRangeEdit.h:44
MachineInstrBundle.h
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::MIBundleOperands
MIBundleOperands - Iterate over all operands in a bundle of machine instructions.
Definition: MachineInstrBundle.h:166
SmallPtrSet.h
llvm::LiveInterval
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
isFullCopyOf
static Register isFullCopyOf(const MachineInstr &MI, Register Reg)
isFullCopyOf - If MI is a COPY to or from Reg, return the other register, otherwise return 0.
Definition: InlineSpiller.cpp:265
llvm::SlotIndex
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
llvm::None
const NoneType None
Definition: None.h:24
BranchProbability.h
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::MachineRegisterInfo::def_empty
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
Definition: MachineRegisterInfo.h:434
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:642
llvm::Spiller::~Spiller
virtual ~Spiller()=0
llvm::LiveIntervals::removePhysRegDefAt
void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos)
Remove value numbers and related live segments starting at position Pos that are part of any liverang...
Definition: LiveIntervals.cpp:1716
llvm::cl::opt< bool >
llvm::SlotIndex::getBaseIndex
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
Definition: SlotIndexes.h:240
llvm::MachineRegisterInfo::reg_bundles
iterator_range< reg_bundle_iterator > reg_bundles(Register Reg) const
Definition: MachineRegisterInfo.h:325
llvm::BlockFrequency
Definition: BlockFrequency.h:23
llvm::LiveRange::getVNInfoBefore
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
llvm::LiveIntervals::ReplaceMachineInstrInMaps
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
Definition: LiveIntervals.h:280
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::AnalyzeVirtRegInBundle
VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned >> *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
Definition: MachineInstrBundle.cpp:283
LiveIntervals.h
VirtRegMap.h
llvm::VNInfo::id
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
llvm::SmallPtrSetImpl::end
iterator end() const
Definition: SmallPtrSet.h:408
llvm::TargetRegisterInfo::getRegClassName
const char * getRegClassName(const TargetRegisterClass *Class) const
Returns the name of the register class.
Definition: TargetRegisterInfo.h:756
llvm::BumpPtrAllocatorImpl
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:63
llvm::HexagonInstrInfo::storeRegToStackSlot
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index.
Definition: HexagonInstrInfo.cpp:954
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:716
llvm::LiveRange::getNumValNums
unsigned getNumValNums() const
Definition: LiveInterval.h:313
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:384
DisableHoisting
static cl::opt< bool > DisableHoisting("disable-spill-hoist", cl::Hidden, cl::desc("Disable inline spill hoisting"))
llvm::MachineRegisterInfo::reg_instr_nodbg_begin
reg_instr_nodbg_iterator reg_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:354
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:608
llvm::SmallPtrSetImpl::begin
iterator begin() const
Definition: SmallPtrSet.h:403
llvm::SlotIndex::isSameInstr
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:196
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1672
ArrayRef.h
llvm::LiveRange::Query
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:541
MachineFunctionPass.h
llvm::Register::isVirtualRegister
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:152
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineOperand::isEarlyClobber
bool isEarlyClobber() const
Definition: MachineOperand.h:435
llvm::PhysRegInfo
Information about how a physical register Reg is used by a set of operands.
Definition: MachineInstrBundle.h:246
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineBasicBlock::predecessors
iterator_range< pred_iterator > predecessors()
Definition: MachineBasicBlock.h:358
llvm::SlotIndex::getRegSlot
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:253
llvm::LiveIntervals::getInterval
LiveInterval & getInterval(Register Reg)
Definition: LiveIntervals.h:114
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
llvm::MachineFunction
Definition: MachineFunction.h:241
isRealSpill
static bool isRealSpill(const MachineInstr &Def)
Check if Def fully defines a VReg with an undefined value.
Definition: InlineSpiller.cpp:1005
llvm::AArch64::RM
@ RM
Definition: AArch64ISelLowering.h:481
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:95
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
None.h
llvm::HexagonInstrInfo::loadRegFromStackSlot
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index.
Definition: HexagonInstrInfo.cpp:999
RestrictStatepointRemat
static cl::opt< bool > RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands"))
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::LiveRange::vnis
iterator_range< vni_iterator > vnis()
Definition: LiveInterval.h:230
llvm::MachineOperand::setIsUndef
void setIsUndef(bool Val=true)
Definition: MachineOperand.h:520
llvm::BranchProbability
Definition: BranchProbability.h:29
Compiler.h
TargetSubtargetInfo.h
llvm::MachineOperand::isDef
bool isDef() const
Definition: MachineOperand.h:374
llvm::HexagonInstrInfo::isLoadFromStackSlot
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Definition: HexagonInstrInfo.cpp:286
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:288
llvm::DomTreeNodeBase
Base class for the actual dominator tree node.
Definition: LiveIntervalCalc.h:24
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:209
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:93
llvm::MachineOperand::getSubReg
unsigned getSubReg() const
Definition: MachineOperand.h:364
LiveRangeEdit.h
llvm::VNInfo::isPHIDef
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
dumpMachineInstrRangeWithSlotIndex
static LLVM_DUMP_METHOD void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg=Register())
Definition: InlineSpiller.cpp:768
getVDefInterval
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS)
Definition: InlineSpiller.cpp:275
llvm::StatepointOpers
MI-level Statepoint operands.
Definition: StackMaps.h:158
llvm::LiveRangeEdit::getParent
const LiveInterval & getParent() const
Definition: LiveRangeEdit.h:144
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:84
llvm::pdb::PDB_LocType::Slot
@ Slot
StackMaps.h
llvm::LiveRange::getValNumInfo
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::VNInfo
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
llvm::LiveRange::getVNInfoAt
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
llvm::MachineInstr::removeOperand
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
Definition: MachineInstr.cpp:276
AA
LiveInterval.h
llvm::LiveRange::MergeValueInAsValue
void MergeValueInAsValue(const LiveRange &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo)
MergeValueInAsValue - Merge all of the segments of a specific val# in RHS into this live range as the...
Definition: LiveInterval.cpp:737
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
SmallVector.h
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:277
MachineInstrBuilder.h
LiveStacks.h
Allocator
Basic Register Allocator
Definition: RegAllocBasic.cpp:142
llvm::VirtRegMap::NO_STACK_SLOT
@ NO_STACK_SLOT
Definition: VirtRegMap.h:37
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:92
llvm::MachineOperand::setReg
void setReg(Register Reg)
Change the register this operand corresponds to.
Definition: MachineOperand.cpp:53
llvm::LiveIntervals::RemoveMachineInstrFromMaps
void RemoveMachineInstrFromMaps(MachineInstr &MI)
Definition: LiveIntervals.h:276
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:104
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1347
llvm::MachineInstr::getNumOperands
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:494
llvm::LiveIntervals::InsertMachineInstrRangeInMaps
void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E)
Definition: LiveIntervals.h:270
llvm::LiveInterval::reg
Register reg() const
Definition: LiveInterval.h:717
llvm::SmallVectorImpl< MachineInstr * >
MachineOperand.h
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::LiveRangeEdit::Delegate
Callback methods for LiveRangeEdit owners.
Definition: LiveRangeEdit.h:47
llvm::VNInfo::isUnused
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
llvm::MachineBasicBlock::SkipPHIsLabelsAndDebug
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp=true)
Return the first instruction in MBB after I that is not a PHI, label or debug.
Definition: MachineBasicBlock.cpp:221
llvm::MachineFunction::DebugOperandMemNumber
const static unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Definition: MachineFunction.h:555
SlotIndexes.h
llvm::cl::desc
Definition: CommandLine.h:405
spill
the custom lowered code happens to be but we shouldn t have to custom lower anything This is probably related to< 2 x i64 > ops being so bad LLVM currently generates stack realignment when it is not necessary needed The problem is that we need to know about stack alignment too before RA runs At that point we don t whether there will be vector spill
Definition: README-SSE.txt:489
llvm::MachineRegisterInfo::reg_nodbg_empty
bool reg_nodbg_empty(Register RegNo) const
reg_nodbg_empty - Return true if the only instructions using or defining Reg are Debug instructions.
Definition: MachineRegisterInfo.h:385
raw_ostream.h
llvm::MachineDominatorTree
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Definition: MachineDominators.h:51
llvm::VirtRegInfo
VirtRegInfo - Information about a virtual register used by a set of operands.
Definition: MachineInstrBundle.h:218
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:111
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::pdb::PDB_SymType::Block
@ Block
llvm::LiveStacks
Definition: LiveStacks.h:35
MachineBlockFrequencyInfo.h
TargetRegisterInfo.h
Debug.h
llvm::LiveQueryResult::isKill
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
llvm::LiveIntervals::getVNInfoAllocator
VNInfo::Allocator & getVNInfoAllocator()
Definition: LiveIntervals.h:284
SetVector.h
MachineDominators.h
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:927
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365