LLVM 18.0.0git
RegAllocFast.cpp
Go to the documentation of this file.
1//===- RegAllocFast.cpp - A fast register allocator for debug code --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This register allocator allocates registers to a basic block at a
10/// time, attempting to keep values in registers and reusing registers as
11/// appropriate.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/IndexedMap.h"
18#include "llvm/ADT/MapVector.h"
19#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/SparseSet.h"
22#include "llvm/ADT/Statistic.h"
40#include "llvm/Pass.h"
41#include "llvm/Support/Debug.h"
44#include <cassert>
45#include <tuple>
46#include <vector>
47
48using namespace llvm;
49
50#define DEBUG_TYPE "regalloc"
51
52STATISTIC(NumStores, "Number of stores added");
53STATISTIC(NumLoads , "Number of loads added");
54STATISTIC(NumCoalesced, "Number of copies coalesced");
55
56// FIXME: Remove this switch when all testcases are fixed!
57static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
59
61 fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator);
62
63namespace {
64
65 class RegAllocFast : public MachineFunctionPass {
66 public:
67 static char ID;
68
69 RegAllocFast(const RegClassFilterFunc F = allocateAllRegClasses,
70 bool ClearVirtRegs_ = true) :
72 ShouldAllocateClass(F),
73 StackSlotForVirtReg(-1),
74 ClearVirtRegs(ClearVirtRegs_) {
75 }
76
77 private:
78 MachineFrameInfo *MFI = nullptr;
79 MachineRegisterInfo *MRI = nullptr;
80 const TargetRegisterInfo *TRI = nullptr;
81 const TargetInstrInfo *TII = nullptr;
82 RegisterClassInfo RegClassInfo;
83 const RegClassFilterFunc ShouldAllocateClass;
84
85 /// Basic block currently being allocated.
86 MachineBasicBlock *MBB = nullptr;
87
88 /// Maps virtual regs to the frame index where these values are spilled.
89 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
90
91 bool ClearVirtRegs;
92
93 /// Everything we know about a live virtual register.
94 struct LiveReg {
95 MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
96 Register VirtReg; ///< Virtual register number.
97 MCPhysReg PhysReg = 0; ///< Currently held here.
98 bool LiveOut = false; ///< Register is possibly live out.
99 bool Reloaded = false; ///< Register was reloaded.
100 bool Error = false; ///< Could not allocate.
101
102 explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
103
104 unsigned getSparseSetIndex() const {
105 return Register::virtReg2Index(VirtReg);
106 }
107 };
108
110 /// This map contains entries for each virtual register that is currently
111 /// available in a physical register.
112 LiveRegMap LiveVirtRegs;
113
114 /// Stores assigned virtual registers present in the bundle MI.
115 DenseMap<Register, MCPhysReg> BundleVirtRegsMap;
116
118 /// List of DBG_VALUE that we encountered without the vreg being assigned
119 /// because they were placed after the last use of the vreg.
121
122 /// Has a bit set for every virtual register for which it was determined
123 /// that it is alive across blocks.
124 BitVector MayLiveAcrossBlocks;
125
126 /// State of a register unit.
127 enum RegUnitState {
128 /// A free register is not currently in use and can be allocated
129 /// immediately without checking aliases.
130 regFree,
131
132 /// A pre-assigned register has been assigned before register allocation
133 /// (e.g., setting up a call parameter).
134 regPreAssigned,
135
136 /// Used temporarily in reloadAtBegin() to mark register units that are
137 /// live-in to the basic block.
138 regLiveIn,
139
140 /// A register state may also be a virtual register number, indication
141 /// that the physical register is currently allocated to a virtual
142 /// register. In that case, LiveVirtRegs contains the inverse mapping.
143 };
144
145 /// Maps each physical register to a RegUnitState enum or virtual register.
146 std::vector<unsigned> RegUnitStates;
147
149
150 using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>;
151 /// Set of register units that are used in the current instruction, and so
152 /// cannot be allocated.
153 RegUnitSet UsedInInstr;
154 RegUnitSet PhysRegUses;
155 SmallVector<uint16_t, 8> DefOperandIndexes;
156 // Register masks attached to the current instruction.
158
159 void setPhysRegState(MCPhysReg PhysReg, unsigned NewState);
160 bool isPhysRegFree(MCPhysReg PhysReg) const;
161
162 /// Mark a physreg as used in this instruction.
163 void markRegUsedInInstr(MCPhysReg PhysReg) {
164 for (MCRegUnit Unit : TRI->regunits(PhysReg))
165 UsedInInstr.insert(Unit);
166 }
167
168 // Check if physreg is clobbered by instruction's regmask(s).
169 bool isClobberedByRegMasks(MCPhysReg PhysReg) const {
170 return llvm::any_of(RegMasks, [PhysReg](const uint32_t *Mask) {
171 return MachineOperand::clobbersPhysReg(Mask, PhysReg);
172 });
173 }
174
175 /// Check if a physreg or any of its aliases are used in this instruction.
176 bool isRegUsedInInstr(MCPhysReg PhysReg, bool LookAtPhysRegUses) const {
177 if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg))
178 return true;
179 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
180 if (UsedInInstr.count(Unit))
181 return true;
182 if (LookAtPhysRegUses && PhysRegUses.count(Unit))
183 return true;
184 }
185 return false;
186 }
187
188 /// Mark physical register as being used in a register use operand.
189 /// This is only used by the special livethrough handling code.
190 void markPhysRegUsedInInstr(MCPhysReg PhysReg) {
191 for (MCRegUnit Unit : TRI->regunits(PhysReg))
192 PhysRegUses.insert(Unit);
193 }
194
195 /// Remove mark of physical register being used in the instruction.
196 void unmarkRegUsedInInstr(MCPhysReg PhysReg) {
197 for (MCRegUnit Unit : TRI->regunits(PhysReg))
198 UsedInInstr.erase(Unit);
199 }
200
201 enum : unsigned {
202 spillClean = 50,
203 spillDirty = 100,
204 spillPrefBonus = 20,
205 spillImpossible = ~0u
206 };
207
208 public:
209 StringRef getPassName() const override { return "Fast Register Allocator"; }
210
211 void getAnalysisUsage(AnalysisUsage &AU) const override {
212 AU.setPreservesCFG();
214 }
215
218 MachineFunctionProperties::Property::NoPHIs);
219 }
220
222 if (ClearVirtRegs) {
224 MachineFunctionProperties::Property::NoVRegs);
225 }
226
228 }
229
232 MachineFunctionProperties::Property::IsSSA);
233 }
234
235 private:
236 bool runOnMachineFunction(MachineFunction &MF) override;
237
238 void allocateBasicBlock(MachineBasicBlock &MBB);
239
240 void addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
241 Register Reg) const;
242
243 void findAndSortDefOperandIndexes(const MachineInstr &MI);
244
245 void allocateInstruction(MachineInstr &MI);
246 void handleDebugValue(MachineInstr &MI);
247 void handleBundle(MachineInstr &MI);
248
249 bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
250 bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
251 bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
252 void freePhysReg(MCPhysReg PhysReg);
253
254 unsigned calcSpillCost(MCPhysReg PhysReg) const;
255
256 LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
257 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
258 }
259
260 LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const {
261 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
262 }
263
264 void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg);
265 void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
266 bool LookAtPhysRegUses = false);
267 void allocVirtRegUndef(MachineOperand &MO);
268 void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
269 MCPhysReg Reg);
270 bool defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
271 Register VirtReg);
272 bool defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
273 bool LookAtPhysRegUses = false);
274 bool useVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg);
275
277 getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
278 SmallSet<Register, 2> &PrologLiveIns) const;
279
280 void reloadAtBegin(MachineBasicBlock &MBB);
281 bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
282
283 Register traceCopies(Register VirtReg) const;
284 Register traceCopyChain(Register Reg) const;
285
286 bool shouldAllocateRegister(const Register Reg) const;
287 int getStackSpaceFor(Register VirtReg);
288 void spill(MachineBasicBlock::iterator Before, Register VirtReg,
289 MCPhysReg AssignedReg, bool Kill, bool LiveOut);
290 void reload(MachineBasicBlock::iterator Before, Register VirtReg,
291 MCPhysReg PhysReg);
292
293 bool mayLiveOut(Register VirtReg);
294 bool mayLiveIn(Register VirtReg);
295
296 void dumpState() const;
297 };
298
299} // end anonymous namespace
300
301char RegAllocFast::ID = 0;
302
303INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
304 false)
305
306bool RegAllocFast::shouldAllocateRegister(const Register Reg) const {
307 assert(Reg.isVirtual());
308 const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
309 return ShouldAllocateClass(*TRI, RC);
310}
311
312void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) {
313 for (MCRegUnit Unit : TRI->regunits(PhysReg))
314 RegUnitStates[Unit] = NewState;
315}
316
317bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const {
318 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
319 if (RegUnitStates[Unit] != regFree)
320 return false;
321 }
322 return true;
323}
324
325/// This allocates space for the specified virtual register to be held on the
326/// stack.
327int RegAllocFast::getStackSpaceFor(Register VirtReg) {
328 // Find the location Reg would belong...
329 int SS = StackSlotForVirtReg[VirtReg];
330 // Already has space allocated?
331 if (SS != -1)
332 return SS;
333
334 // Allocate a new stack object for this spill location...
335 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
336 unsigned Size = TRI->getSpillSize(RC);
337 Align Alignment = TRI->getSpillAlign(RC);
338 int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment);
339
340 // Assign the slot.
341 StackSlotForVirtReg[VirtReg] = FrameIdx;
342 return FrameIdx;
343}
344
348 auto MBBEnd = MBB.end();
349 if (B == MBBEnd)
350 return true;
351
353 for (; &*I != A && &*I != B; ++I)
354 ;
355
356 return &*I == A;
357}
358
359/// Returns false if \p VirtReg is known to not live out of the current block.
360bool RegAllocFast::mayLiveOut(Register VirtReg) {
361 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) {
362 // Cannot be live-out if there are no successors.
363 return !MBB->succ_empty();
364 }
365
366 const MachineInstr *SelfLoopDef = nullptr;
367
368 // If this block loops back to itself, it is necessary to check whether the
369 // use comes after the def.
370 if (MBB->isSuccessor(MBB)) {
371 // Find the first def in the self loop MBB.
372 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
373 if (DefInst.getParent() != MBB) {
374 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
375 return true;
376 } else {
377 if (!SelfLoopDef || dominates(*MBB, DefInst.getIterator(), SelfLoopDef))
378 SelfLoopDef = &DefInst;
379 }
380 }
381 if (!SelfLoopDef) {
382 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
383 return true;
384 }
385 }
386
387 // See if the first \p Limit uses of the register are all in the current
388 // block.
389 static const unsigned Limit = 8;
390 unsigned C = 0;
391 for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
392 if (UseInst.getParent() != MBB || ++C >= Limit) {
393 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
394 // Cannot be live-out if there are no successors.
395 return !MBB->succ_empty();
396 }
397
398 if (SelfLoopDef) {
399 // Try to handle some simple cases to avoid spilling and reloading every
400 // value inside a self looping block.
401 if (SelfLoopDef == &UseInst ||
402 !dominates(*MBB, SelfLoopDef->getIterator(), UseInst.getIterator())) {
403 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
404 return true;
405 }
406 }
407 }
408
409 return false;
410}
411
412/// Returns false if \p VirtReg is known to not be live into the current block.
413bool RegAllocFast::mayLiveIn(Register VirtReg) {
414 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg)))
415 return !MBB->pred_empty();
416
417 // See if the first \p Limit def of the register are all in the current block.
418 static const unsigned Limit = 8;
419 unsigned C = 0;
420 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
421 if (DefInst.getParent() != MBB || ++C >= Limit) {
422 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
423 return !MBB->pred_empty();
424 }
425 }
426
427 return false;
428}
429
430/// Insert spill instruction for \p AssignedReg before \p Before. Update
431/// DBG_VALUEs with \p VirtReg operands with the stack slot.
432void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg,
433 MCPhysReg AssignedReg, bool Kill, bool LiveOut) {
434 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI)
435 << " in " << printReg(AssignedReg, TRI));
436 int FI = getStackSpaceFor(VirtReg);
437 LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
438
439 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
440 TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI,
441 VirtReg);
442 ++NumStores;
443
445
446 // When we spill a virtual register, we will have spill instructions behind
447 // every definition of it, meaning we can switch all the DBG_VALUEs over
448 // to just reference the stack slot.
449 SmallVectorImpl<MachineOperand *> &LRIDbgOperands = LiveDbgValueMap[VirtReg];
451 SpilledOperandsMap;
452 for (MachineOperand *MO : LRIDbgOperands)
453 SpilledOperandsMap[MO->getParent()].push_back(MO);
454 for (auto MISpilledOperands : SpilledOperandsMap) {
455 MachineInstr &DBG = *MISpilledOperands.first;
456 // We don't have enough support for tracking operands of DBG_VALUE_LISTs.
457 if (DBG.isDebugValueList())
458 continue;
460 *MBB, Before, *MISpilledOperands.first, FI, MISpilledOperands.second);
461 assert(NewDV->getParent() == MBB && "dangling parent pointer");
462 (void)NewDV;
463 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
464
465 if (LiveOut) {
466 // We need to insert a DBG_VALUE at the end of the block if the spill slot
467 // is live out, but there is another use of the value after the
468 // spill. This will allow LiveDebugValues to see the correct live out
469 // value to propagate to the successors.
470 MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(NewDV);
471 MBB->insert(FirstTerm, ClonedDV);
472 LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
473 }
474
475 // Rewrite unassigned dbg_values to use the stack slot.
476 // TODO We can potentially do this for list debug values as well if we know
477 // how the dbg_values are getting unassigned.
478 if (DBG.isNonListDebugValue()) {
479 MachineOperand &MO = DBG.getDebugOperand(0);
480 if (MO.isReg() && MO.getReg() == 0) {
481 updateDbgValueForSpill(DBG, FI, 0);
482 }
483 }
484 }
485 // Now this register is spilled there is should not be any DBG_VALUE
486 // pointing to this register because they are all pointing to spilled value
487 // now.
488 LRIDbgOperands.clear();
489}
490
491/// Insert reload instruction for \p PhysReg before \p Before.
492void RegAllocFast::reload(MachineBasicBlock::iterator Before, Register VirtReg,
493 MCPhysReg PhysReg) {
494 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
495 << printReg(PhysReg, TRI) << '\n');
496 int FI = getStackSpaceFor(VirtReg);
497 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
498 TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI, VirtReg);
499 ++NumLoads;
500}
501
502/// Get basic block begin insertion point.
503/// This is not just MBB.begin() because surprisingly we have EH_LABEL
504/// instructions marking the begin of a basic block. This means we must insert
505/// new instructions after such labels...
507RegAllocFast::getMBBBeginInsertionPoint(
508 MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
510 while (I != MBB.end()) {
511 if (I->isLabel()) {
512 ++I;
513 continue;
514 }
515
516 // Most reloads should be inserted after prolog instructions.
517 if (!TII->isBasicBlockPrologue(*I))
518 break;
519
520 // However if a prolog instruction reads a register that needs to be
521 // reloaded, the reload should be inserted before the prolog.
522 for (MachineOperand &MO : I->operands()) {
523 if (MO.isReg())
524 PrologLiveIns.insert(MO.getReg());
525 }
526
527 ++I;
528 }
529
530 return I;
531}
532
533/// Reload all currently assigned virtual registers.
534void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) {
535 if (LiveVirtRegs.empty())
536 return;
537
539 MCPhysReg Reg = P.PhysReg;
540 // Set state to live-in. This possibly overrides mappings to virtual
541 // registers but we don't care anymore at this point.
542 setPhysRegState(Reg, regLiveIn);
543 }
544
545
546 SmallSet<Register, 2> PrologLiveIns;
547
548 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
549 // of spilling here is deterministic, if arbitrary.
550 MachineBasicBlock::iterator InsertBefore
551 = getMBBBeginInsertionPoint(MBB, PrologLiveIns);
552 for (const LiveReg &LR : LiveVirtRegs) {
553 MCPhysReg PhysReg = LR.PhysReg;
554 if (PhysReg == 0)
555 continue;
556
557 MCRegister FirstUnit = *TRI->regunits(PhysReg).begin();
558 if (RegUnitStates[FirstUnit] == regLiveIn)
559 continue;
560
562 "no reload in start block. Missing vreg def?");
563
564 if (PrologLiveIns.count(PhysReg)) {
565 // FIXME: Theoretically this should use an insert point skipping labels
566 // but I'm not sure how labels should interact with prolog instruction
567 // that need reloads.
568 reload(MBB.begin(), LR.VirtReg, PhysReg);
569 } else
570 reload(InsertBefore, LR.VirtReg, PhysReg);
571 }
572 LiveVirtRegs.clear();
573}
574
575/// Handle the direct use of a physical register. Check that the register is
576/// not used by a virtreg. Kill the physreg, marking it free. This may add
577/// implicit kills to MO->getParent() and invalidate MO.
578bool RegAllocFast::usePhysReg(MachineInstr &MI, MCPhysReg Reg) {
579 assert(Register::isPhysicalRegister(Reg) && "expected physreg");
580 bool displacedAny = displacePhysReg(MI, Reg);
581 setPhysRegState(Reg, regPreAssigned);
582 markRegUsedInInstr(Reg);
583 return displacedAny;
584}
585
586bool RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg Reg) {
587 bool displacedAny = displacePhysReg(MI, Reg);
588 setPhysRegState(Reg, regPreAssigned);
589 return displacedAny;
590}
591
592/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
593/// similar to defineVirtReg except the physreg is reserved instead of
594/// allocated.
595bool RegAllocFast::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) {
596 bool displacedAny = false;
597
598 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
599 switch (unsigned VirtReg = RegUnitStates[Unit]) {
600 default: {
601 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
602 assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
603 MachineBasicBlock::iterator ReloadBefore =
604 std::next((MachineBasicBlock::iterator)MI.getIterator());
605 reload(ReloadBefore, VirtReg, LRI->PhysReg);
606
607 setPhysRegState(LRI->PhysReg, regFree);
608 LRI->PhysReg = 0;
609 LRI->Reloaded = true;
610 displacedAny = true;
611 break;
612 }
613 case regPreAssigned:
614 RegUnitStates[Unit] = regFree;
615 displacedAny = true;
616 break;
617 case regFree:
618 break;
619 }
620 }
621 return displacedAny;
622}
623
624void RegAllocFast::freePhysReg(MCPhysReg PhysReg) {
625 LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
626
627 MCRegister FirstUnit = *TRI->regunits(PhysReg).begin();
628 switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
629 case regFree:
630 LLVM_DEBUG(dbgs() << '\n');
631 return;
632 case regPreAssigned:
633 LLVM_DEBUG(dbgs() << '\n');
634 setPhysRegState(PhysReg, regFree);
635 return;
636 default: {
637 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
638 assert(LRI != LiveVirtRegs.end());
639 LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
640 setPhysRegState(LRI->PhysReg, regFree);
641 LRI->PhysReg = 0;
642 }
643 return;
644 }
645}
646
647/// Return the cost of spilling clearing out PhysReg and aliases so it is free
648/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
649/// disabled - it can be allocated directly.
650/// \returns spillImpossible when PhysReg or an alias can't be spilled.
651unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
652 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
653 switch (unsigned VirtReg = RegUnitStates[Unit]) {
654 case regFree:
655 break;
656 case regPreAssigned:
657 LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
658 << printReg(PhysReg, TRI) << '\n');
659 return spillImpossible;
660 default: {
661 bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
662 findLiveVirtReg(VirtReg)->LiveOut;
663 return SureSpill ? spillClean : spillDirty;
664 }
665 }
666 }
667 return 0;
668}
669
670void RegAllocFast::assignDanglingDebugValues(MachineInstr &Definition,
671 Register VirtReg, MCPhysReg Reg) {
672 auto UDBGValIter = DanglingDbgValues.find(VirtReg);
673 if (UDBGValIter == DanglingDbgValues.end())
674 return;
675
676 SmallVectorImpl<MachineInstr*> &Dangling = UDBGValIter->second;
677 for (MachineInstr *DbgValue : Dangling) {
678 assert(DbgValue->isDebugValue());
679 if (!DbgValue->hasDebugOperandForReg(VirtReg))
680 continue;
681
682 // Test whether the physreg survives from the definition to the DBG_VALUE.
683 MCPhysReg SetToReg = Reg;
684 unsigned Limit = 20;
685 for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
686 E = DbgValue->getIterator(); I != E; ++I) {
687 if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
688 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
689 << '\n');
690 SetToReg = 0;
691 break;
692 }
693 }
694 for (MachineOperand &MO : DbgValue->getDebugOperandsForReg(VirtReg)) {
695 MO.setReg(SetToReg);
696 if (SetToReg != 0)
697 MO.setIsRenamable();
698 }
699 }
700 Dangling.clear();
701}
702
703/// This method updates local state so that we know that PhysReg is the
704/// proper container for VirtReg now. The physical register must not be used
705/// for anything else when this is called.
706void RegAllocFast::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
707 MCPhysReg PhysReg) {
708 Register VirtReg = LR.VirtReg;
709 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
710 << printReg(PhysReg, TRI) << '\n');
711 assert(LR.PhysReg == 0 && "Already assigned a physreg");
712 assert(PhysReg != 0 && "Trying to assign no register");
713 LR.PhysReg = PhysReg;
714 setPhysRegState(PhysReg, VirtReg);
715
716 assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
717}
718
719static bool isCoalescable(const MachineInstr &MI) {
720 return MI.isFullCopy();
721}
722
723Register RegAllocFast::traceCopyChain(Register Reg) const {
724 static const unsigned ChainLengthLimit = 3;
725 unsigned C = 0;
726 do {
727 if (Reg.isPhysical())
728 return Reg;
729 assert(Reg.isVirtual());
730
731 MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg);
732 if (!VRegDef || !isCoalescable(*VRegDef))
733 return 0;
734 Reg = VRegDef->getOperand(1).getReg();
735 } while (++C <= ChainLengthLimit);
736 return 0;
737}
738
739/// Check if any of \p VirtReg's definitions is a copy. If it is follow the
740/// chain of copies to check whether we reach a physical register we can
741/// coalesce with.
742Register RegAllocFast::traceCopies(Register VirtReg) const {
743 static const unsigned DefLimit = 3;
744 unsigned C = 0;
745 for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) {
746 if (isCoalescable(MI)) {
747 Register Reg = MI.getOperand(1).getReg();
748 Reg = traceCopyChain(Reg);
749 if (Reg.isValid())
750 return Reg;
751 }
752
753 if (++C >= DefLimit)
754 break;
755 }
756 return Register();
757}
758
759/// Allocates a physical register for VirtReg.
760void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR,
761 Register Hint0, bool LookAtPhysRegUses) {
762 const Register VirtReg = LR.VirtReg;
763 assert(LR.PhysReg == 0);
764
765 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
766 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
767 << " in class " << TRI->getRegClassName(&RC)
768 << " with hint " << printReg(Hint0, TRI) << '\n');
769
770 // Take hint when possible.
771 if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
772 !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
773 // Take hint if the register is currently free.
774 if (isPhysRegFree(Hint0)) {
775 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
776 << '\n');
777 assignVirtToPhysReg(MI, LR, Hint0);
778 return;
779 } else {
780 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
781 << " occupied\n");
782 }
783 } else {
784 Hint0 = Register();
785 }
786
787
788 // Try other hint.
789 Register Hint1 = traceCopies(VirtReg);
790 if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
791 !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
792 // Take hint if the register is currently free.
793 if (isPhysRegFree(Hint1)) {
794 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
795 << '\n');
796 assignVirtToPhysReg(MI, LR, Hint1);
797 return;
798 } else {
799 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
800 << " occupied\n");
801 }
802 } else {
803 Hint1 = Register();
804 }
805
806 MCPhysReg BestReg = 0;
807 unsigned BestCost = spillImpossible;
808 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
809 for (MCPhysReg PhysReg : AllocationOrder) {
810 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
811 if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
812 LLVM_DEBUG(dbgs() << "already used in instr.\n");
813 continue;
814 }
815
816 unsigned Cost = calcSpillCost(PhysReg);
817 LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
818 // Immediate take a register with cost 0.
819 if (Cost == 0) {
820 assignVirtToPhysReg(MI, LR, PhysReg);
821 return;
822 }
823
824 if (PhysReg == Hint0 || PhysReg == Hint1)
825 Cost -= spillPrefBonus;
826
827 if (Cost < BestCost) {
828 BestReg = PhysReg;
829 BestCost = Cost;
830 }
831 }
832
833 if (!BestReg) {
834 // Nothing we can do: Report an error and keep going with an invalid
835 // allocation.
836 if (MI.isInlineAsm())
837 MI.emitError("inline assembly requires more registers than available");
838 else
839 MI.emitError("ran out of registers during register allocation");
840
841 LR.Error = true;
842 LR.PhysReg = 0;
843 return;
844 }
845
846 displacePhysReg(MI, BestReg);
847 assignVirtToPhysReg(MI, LR, BestReg);
848}
849
850void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
851 assert(MO.isUndef() && "expected undef use");
852 Register VirtReg = MO.getReg();
853 assert(VirtReg.isVirtual() && "Expected virtreg");
854 if (!shouldAllocateRegister(VirtReg))
855 return;
856
857 LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
858 MCPhysReg PhysReg;
859 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
860 PhysReg = LRI->PhysReg;
861 } else {
862 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
863 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
864 assert(!AllocationOrder.empty() && "Allocation order must not be empty");
865 PhysReg = AllocationOrder[0];
866 }
867
868 unsigned SubRegIdx = MO.getSubReg();
869 if (SubRegIdx != 0) {
870 PhysReg = TRI->getSubReg(PhysReg, SubRegIdx);
871 MO.setSubReg(0);
872 }
873 MO.setReg(PhysReg);
874 MO.setIsRenamable(true);
875}
876
877/// Variation of defineVirtReg() with special handling for livethrough regs
878/// (tied or earlyclobber) that may interfere with preassigned uses.
879/// \return true if MI's MachineOperands were re-arranged/invalidated.
880bool RegAllocFast::defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
881 Register VirtReg) {
882 if (!shouldAllocateRegister(VirtReg))
883 return false;
884 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
885 if (LRI != LiveVirtRegs.end()) {
886 MCPhysReg PrevReg = LRI->PhysReg;
887 if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
888 LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
889 << " (tied/earlyclobber resolution)\n");
890 freePhysReg(PrevReg);
891 LRI->PhysReg = 0;
892 allocVirtReg(MI, *LRI, 0, true);
893 MachineBasicBlock::iterator InsertBefore =
894 std::next((MachineBasicBlock::iterator)MI.getIterator());
895 LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
896 << printReg(PrevReg, TRI) << '\n');
897 BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
898 TII->get(TargetOpcode::COPY), PrevReg)
899 .addReg(LRI->PhysReg, llvm::RegState::Kill);
900 }
901 MachineOperand &MO = MI.getOperand(OpNum);
902 if (MO.getSubReg() && !MO.isUndef()) {
903 LRI->LastUse = &MI;
904 }
905 }
906 return defineVirtReg(MI, OpNum, VirtReg, true);
907}
908
909/// Allocates a register for VirtReg definition. Typically the register is
910/// already assigned from a use of the virtreg, however we still need to
911/// perform an allocation if:
912/// - It is a dead definition without any uses.
913/// - The value is live out and all uses are in different basic blocks.
914///
915/// \return true if MI's MachineOperands were re-arranged/invalidated.
916bool RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
917 Register VirtReg, bool LookAtPhysRegUses) {
918 assert(VirtReg.isVirtual() && "Not a virtual register");
919 if (!shouldAllocateRegister(VirtReg))
920 return false;
921 MachineOperand &MO = MI.getOperand(OpNum);
922 LiveRegMap::iterator LRI;
923 bool New;
924 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
925 if (New) {
926 if (!MO.isDead()) {
927 if (mayLiveOut(VirtReg)) {
928 LRI->LiveOut = true;
929 } else {
930 // It is a dead def without the dead flag; add the flag now.
931 MO.setIsDead(true);
932 }
933 }
934 }
935 if (LRI->PhysReg == 0) {
936 allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
937 // If no physical register is available for LRI, we assign one at random
938 // and bail out of this function immediately.
939 if (LRI->Error) {
940 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
941 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
942 if (AllocationOrder.empty())
943 return setPhysReg(MI, MO, MCRegister::NoRegister);
944 return setPhysReg(MI, MO, *AllocationOrder.begin());
945 }
946 } else {
947 assert(!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) &&
948 "TODO: preassign mismatch");
949 LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
950 << " use existing assignment to "
951 << printReg(LRI->PhysReg, TRI) << '\n');
952 }
953
954 MCPhysReg PhysReg = LRI->PhysReg;
955 if (LRI->Reloaded || LRI->LiveOut) {
956 if (!MI.isImplicitDef()) {
957 MachineBasicBlock::iterator SpillBefore =
958 std::next((MachineBasicBlock::iterator)MI.getIterator());
959 LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut << " RL: "
960 << LRI->Reloaded << '\n');
961 bool Kill = LRI->LastUse == nullptr;
962 spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
963
964 // We need to place additional spills for each indirect destination of an
965 // INLINEASM_BR.
966 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) {
967 int FI = StackSlotForVirtReg[VirtReg];
968 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
969 for (MachineOperand &MO : MI.operands()) {
970 if (MO.isMBB()) {
971 MachineBasicBlock *Succ = MO.getMBB();
972 TII->storeRegToStackSlot(*Succ, Succ->begin(), PhysReg, Kill,
973 FI, &RC, TRI, VirtReg);
974 ++NumStores;
975 Succ->addLiveIn(PhysReg);
976 }
977 }
978 }
979
980 LRI->LastUse = nullptr;
981 }
982 LRI->LiveOut = false;
983 LRI->Reloaded = false;
984 }
985 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
986 BundleVirtRegsMap[VirtReg] = PhysReg;
987 }
988 markRegUsedInInstr(PhysReg);
989 return setPhysReg(MI, MO, PhysReg);
990}
991
992/// Allocates a register for a VirtReg use.
993/// \return true if MI's MachineOperands were re-arranged/invalidated.
994bool RegAllocFast::useVirtReg(MachineInstr &MI, unsigned OpNum,
995 Register VirtReg) {
996 assert(VirtReg.isVirtual() && "Not a virtual register");
997 if (!shouldAllocateRegister(VirtReg))
998 return false;
999 MachineOperand &MO = MI.getOperand(OpNum);
1000 LiveRegMap::iterator LRI;
1001 bool New;
1002 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
1003 if (New) {
1004 MachineOperand &MO = MI.getOperand(OpNum);
1005 if (!MO.isKill()) {
1006 if (mayLiveOut(VirtReg)) {
1007 LRI->LiveOut = true;
1008 } else {
1009 // It is a last (killing) use without the kill flag; add the flag now.
1010 MO.setIsKill(true);
1011 }
1012 }
1013 } else {
1014 assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
1015 }
1016
1017 // If necessary allocate a register.
1018 if (LRI->PhysReg == 0) {
1019 assert(!MO.isTied() && "tied op should be allocated");
1020 Register Hint;
1021 if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
1022 Hint = MI.getOperand(0).getReg();
1023 if (Hint.isVirtual()) {
1024 assert(!shouldAllocateRegister(Hint));
1025 Hint = Register();
1026 } else {
1027 assert(Hint.isPhysical() &&
1028 "Copy destination should already be assigned");
1029 }
1030 }
1031 allocVirtReg(MI, *LRI, Hint, false);
1032 if (LRI->Error) {
1033 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1034 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
1035 if (AllocationOrder.empty())
1036 return setPhysReg(MI, MO, MCRegister::NoRegister);
1037 return setPhysReg(MI, MO, *AllocationOrder.begin());
1038 }
1039 }
1040
1041 LRI->LastUse = &MI;
1042
1043 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1044 BundleVirtRegsMap[VirtReg] = LRI->PhysReg;
1045 }
1046 markRegUsedInInstr(LRI->PhysReg);
1047 return setPhysReg(MI, MO, LRI->PhysReg);
1048}
1049
1050/// Changes operand OpNum in MI the refer the PhysReg, considering subregs.
1051/// \return true if MI's MachineOperands were re-arranged/invalidated.
1052bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
1053 MCPhysReg PhysReg) {
1054 if (!MO.getSubReg()) {
1055 MO.setReg(PhysReg);
1056 MO.setIsRenamable(true);
1057 return false;
1058 }
1059
1060 // Handle subregister index.
1061 MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : MCRegister());
1062 MO.setIsRenamable(true);
1063 // Note: We leave the subreg number around a little longer in case of defs.
1064 // This is so that the register freeing logic in allocateInstruction can still
1065 // recognize this as subregister defs. The code there will clear the number.
1066 if (!MO.isDef())
1067 MO.setSubReg(0);
1068
1069 // A kill flag implies killing the full register. Add corresponding super
1070 // register kill.
1071 if (MO.isKill()) {
1072 MI.addRegisterKilled(PhysReg, TRI, true);
1073 // Conservatively assume implicit MOs were re-arranged
1074 return true;
1075 }
1076
1077 // A <def,read-undef> of a sub-register requires an implicit def of the full
1078 // register.
1079 if (MO.isDef() && MO.isUndef()) {
1080 if (MO.isDead())
1081 MI.addRegisterDead(PhysReg, TRI, true);
1082 else
1083 MI.addRegisterDefined(PhysReg, TRI);
1084 // Conservatively assume implicit MOs were re-arranged
1085 return true;
1086 }
1087 return false;
1088}
1089
1090#ifndef NDEBUG
1091
1092void RegAllocFast::dumpState() const {
1093 for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE;
1094 ++Unit) {
1095 switch (unsigned VirtReg = RegUnitStates[Unit]) {
1096 case regFree:
1097 break;
1098 case regPreAssigned:
1099 dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
1100 break;
1101 case regLiveIn:
1102 llvm_unreachable("Should not have regLiveIn in map");
1103 default: {
1104 dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
1105 LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
1106 assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
1107 if (I->LiveOut || I->Reloaded) {
1108 dbgs() << '[';
1109 if (I->LiveOut) dbgs() << 'O';
1110 if (I->Reloaded) dbgs() << 'R';
1111 dbgs() << ']';
1112 }
1113 assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
1114 break;
1115 }
1116 }
1117 }
1118 dbgs() << '\n';
1119 // Check that LiveVirtRegs is the inverse.
1120 for (const LiveReg &LR : LiveVirtRegs) {
1121 Register VirtReg = LR.VirtReg;
1122 assert(VirtReg.isVirtual() && "Bad map key");
1123 MCPhysReg PhysReg = LR.PhysReg;
1124 if (PhysReg != 0) {
1126 "mapped to physreg");
1127 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
1128 assert(RegUnitStates[Unit] == VirtReg && "inverse map valid");
1129 }
1130 }
1131 }
1132}
1133#endif
1134
1135/// Count number of defs consumed from each register class by \p Reg
1136void RegAllocFast::addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
1137 Register Reg) const {
1138 assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
1139
1140 if (Reg.isVirtual()) {
1141 if (!shouldAllocateRegister(Reg))
1142 return;
1143 const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
1144 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1145 RCIdx != RCIdxEnd; ++RCIdx) {
1146 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1147 // FIXME: Consider aliasing sub/super registers.
1148 if (OpRC->hasSubClassEq(IdxRC))
1149 ++RegClassDefCounts[RCIdx];
1150 }
1151
1152 return;
1153 }
1154
1155 for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
1156 RCIdx != RCIdxEnd; ++RCIdx) {
1157 const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
1158 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
1159 if (IdxRC->contains(*Alias)) {
1160 ++RegClassDefCounts[RCIdx];
1161 break;
1162 }
1163 }
1164 }
1165}
1166
1167/// Compute \ref DefOperandIndexes so it contains the indices of "def" operands
1168/// that are to be allocated. Those are ordered in a way that small classes,
1169/// early clobbers and livethroughs are allocated first.
1170void RegAllocFast::findAndSortDefOperandIndexes(const MachineInstr &MI) {
1171 DefOperandIndexes.clear();
1172
1173 // Track number of defs which may consume a register from the class.
1174 std::vector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
1175 assert(RegClassDefCounts[0] == 0);
1176
1177 LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
1178 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1179 const MachineOperand &MO = MI.getOperand(I);
1180 if (!MO.isReg())
1181 continue;
1182 Register Reg = MO.getReg();
1183 if (MO.readsReg()) {
1184 if (Reg.isPhysical()) {
1185 LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI) << '\n');
1186 markPhysRegUsedInInstr(Reg);
1187 }
1188 }
1189
1190 if (MO.isDef()) {
1191 if (Reg.isVirtual() && shouldAllocateRegister(Reg))
1192 DefOperandIndexes.push_back(I);
1193
1194 addRegClassDefCounts(RegClassDefCounts, Reg);
1195 }
1196 }
1197
1198 llvm::sort(DefOperandIndexes, [&](uint16_t I0, uint16_t I1) {
1199 const MachineOperand &MO0 = MI.getOperand(I0);
1200 const MachineOperand &MO1 = MI.getOperand(I1);
1201 Register Reg0 = MO0.getReg();
1202 Register Reg1 = MO1.getReg();
1203 const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
1204 const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
1205
1206 // Identify regclass that are easy to use up completely just in this
1207 // instruction.
1208 unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
1209 unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
1210
1211 bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
1212 bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
1213 if (SmallClass0 > SmallClass1)
1214 return true;
1215 if (SmallClass0 < SmallClass1)
1216 return false;
1217
1218 // Allocate early clobbers and livethrough operands first.
1219 bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
1220 (MO0.getSubReg() == 0 && !MO0.isUndef());
1221 bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
1222 (MO1.getSubReg() == 0 && !MO1.isUndef());
1223 if (Livethrough0 > Livethrough1)
1224 return true;
1225 if (Livethrough0 < Livethrough1)
1226 return false;
1227
1228 // Tie-break rule: operand index.
1229 return I0 < I1;
1230 });
1231}
1232
1233void RegAllocFast::allocateInstruction(MachineInstr &MI) {
1234 // The basic algorithm here is:
1235 // 1. Mark registers of def operands as free
1236 // 2. Allocate registers to use operands and place reload instructions for
1237 // registers displaced by the allocation.
1238 //
1239 // However we need to handle some corner cases:
1240 // - pre-assigned defs and uses need to be handled before the other def/use
1241 // operands are processed to avoid the allocation heuristics clashing with
1242 // the pre-assignment.
1243 // - The "free def operands" step has to come last instead of first for tied
1244 // operands and early-clobbers.
1245
1246 UsedInInstr.clear();
1247 RegMasks.clear();
1248 BundleVirtRegsMap.clear();
1249
1250 auto TiedOpIsUndef = [&](const MachineOperand &MO, unsigned Idx) {
1251 assert(MO.isTied());
1252 unsigned TiedIdx = MI.findTiedOperandIdx(Idx);
1253 const MachineOperand &TiedMO = MI.getOperand(TiedIdx);
1254 return TiedMO.isUndef();
1255 };
1256 // Scan for special cases; Apply pre-assigned register defs to state.
1257 bool HasPhysRegUse = false;
1258 bool HasRegMask = false;
1259 bool HasVRegDef = false;
1260 bool HasDef = false;
1261 bool HasEarlyClobber = false;
1262 bool NeedToAssignLiveThroughs = false;
1263 for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
1264 MachineOperand &MO = MI.getOperand(I);
1265 if (MO.isReg()) {
1266 Register Reg = MO.getReg();
1267 if (Reg.isVirtual()) {
1268 if (!shouldAllocateRegister(Reg))
1269 continue;
1270 if (MO.isDef()) {
1271 HasDef = true;
1272 HasVRegDef = true;
1273 if (MO.isEarlyClobber()) {
1274 HasEarlyClobber = true;
1275 NeedToAssignLiveThroughs = true;
1276 }
1277 if ((MO.isTied() && !TiedOpIsUndef(MO, I)) ||
1278 (MO.getSubReg() != 0 && !MO.isUndef()))
1279 NeedToAssignLiveThroughs = true;
1280 }
1281 } else if (Reg.isPhysical()) {
1282 if (!MRI->isReserved(Reg)) {
1283 if (MO.isDef()) {
1284 HasDef = true;
1285 bool displacedAny = definePhysReg(MI, Reg);
1286 if (MO.isEarlyClobber())
1287 HasEarlyClobber = true;
1288 if (!displacedAny)
1289 MO.setIsDead(true);
1290 }
1291 if (MO.readsReg())
1292 HasPhysRegUse = true;
1293 }
1294 }
1295 } else if (MO.isRegMask()) {
1296 HasRegMask = true;
1297 RegMasks.push_back(MO.getRegMask());
1298 }
1299 }
1300
1301 // Allocate virtreg defs.
1302 if (HasDef) {
1303 if (HasVRegDef) {
1304 // Note that Implicit MOs can get re-arranged by defineVirtReg(), so loop
1305 // multiple times to ensure no operand is missed.
1306 bool ReArrangedImplicitOps = true;
1307
1308 // Special handling for early clobbers, tied operands or subregister defs:
1309 // Compared to "normal" defs these:
1310 // - Must not use a register that is pre-assigned for a use operand.
1311 // - In order to solve tricky inline assembly constraints we change the
1312 // heuristic to figure out a good operand order before doing
1313 // assignments.
1314 if (NeedToAssignLiveThroughs) {
1315 PhysRegUses.clear();
1316
1317 while (ReArrangedImplicitOps) {
1318 ReArrangedImplicitOps = false;
1319 findAndSortDefOperandIndexes(MI);
1320 for (uint16_t OpIdx : DefOperandIndexes) {
1321 MachineOperand &MO = MI.getOperand(OpIdx);
1322 LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
1323 unsigned Reg = MO.getReg();
1324 if (MO.isEarlyClobber() ||
1325 (MO.isTied() && !TiedOpIsUndef(MO, OpIdx)) ||
1326 (MO.getSubReg() && !MO.isUndef())) {
1327 ReArrangedImplicitOps = defineLiveThroughVirtReg(MI, OpIdx, Reg);
1328 } else {
1329 ReArrangedImplicitOps = defineVirtReg(MI, OpIdx, Reg);
1330 }
1331 if (ReArrangedImplicitOps) {
1332 // Implicit operands of MI were re-arranged,
1333 // re-compute DefOperandIndexes.
1334 break;
1335 }
1336 }
1337 }
1338 } else {
1339 // Assign virtual register defs.
1340 while (ReArrangedImplicitOps) {
1341 ReArrangedImplicitOps = false;
1342 for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
1343 MachineOperand &MO = MI.getOperand(I);
1344 if (!MO.isReg() || !MO.isDef())
1345 continue;
1346 Register Reg = MO.getReg();
1347 if (Reg.isVirtual()) {
1348 ReArrangedImplicitOps = defineVirtReg(MI, I, Reg);
1349 if (ReArrangedImplicitOps) {
1350 break;
1351 }
1352 }
1353 }
1354 }
1355 }
1356 }
1357
1358 // Free registers occupied by defs.
1359 // Iterate operands in reverse order, so we see the implicit super register
1360 // defs first (we added them earlier in case of <def,read-undef>).
1361 for (signed I = MI.getNumOperands() - 1; I >= 0; --I) {
1362 MachineOperand &MO = MI.getOperand(I);
1363 if (!MO.isReg() || !MO.isDef())
1364 continue;
1365
1366 Register Reg = MO.getReg();
1367
1368 // subreg defs don't free the full register. We left the subreg number
1369 // around as a marker in setPhysReg() to recognize this case here.
1370 if (Reg.isPhysical() && MO.getSubReg() != 0) {
1371 MO.setSubReg(0);
1372 continue;
1373 }
1374
1375 assert((!MO.isTied() || !isClobberedByRegMasks(MO.getReg())) &&
1376 "tied def assigned to clobbered register");
1377
1378 // Do not free tied operands and early clobbers.
1379 if ((MO.isTied() && !TiedOpIsUndef(MO, I)) || MO.isEarlyClobber())
1380 continue;
1381 if (!Reg)
1382 continue;
1383 if (Reg.isVirtual()) {
1384 assert(!shouldAllocateRegister(Reg));
1385 continue;
1386 }
1387 assert(Reg.isPhysical());
1388 if (MRI->isReserved(Reg))
1389 continue;
1390 freePhysReg(Reg);
1391 unmarkRegUsedInInstr(Reg);
1392 }
1393 }
1394
1395 // Displace clobbered registers.
1396 if (HasRegMask) {
1397 assert(!RegMasks.empty() && "expected RegMask");
1398 // MRI bookkeeping.
1399 for (const auto *RM : RegMasks)
1400 MRI->addPhysRegsUsedFromRegMask(RM);
1401
1402 // Displace clobbered registers.
1403 for (const LiveReg &LR : LiveVirtRegs) {
1404 MCPhysReg PhysReg = LR.PhysReg;
1405 if (PhysReg != 0 && isClobberedByRegMasks(PhysReg))
1406 displacePhysReg(MI, PhysReg);
1407 }
1408 }
1409
1410 // Apply pre-assigned register uses to state.
1411 if (HasPhysRegUse) {
1412 for (MachineOperand &MO : MI.operands()) {
1413 if (!MO.isReg() || !MO.readsReg())
1414 continue;
1415 Register Reg = MO.getReg();
1416 if (!Reg.isPhysical())
1417 continue;
1418 if (MRI->isReserved(Reg))
1419 continue;
1420 bool displacedAny = usePhysReg(MI, Reg);
1421 if (!displacedAny)
1422 MO.setIsKill(true);
1423 }
1424 }
1425
1426 // Allocate virtreg uses and insert reloads as necessary.
1427 // Implicit MOs can get moved/removed by useVirtReg(), so loop multiple
1428 // times to ensure no operand is missed.
1429 bool HasUndefUse = false;
1430 bool ReArrangedImplicitMOs = true;
1431 while (ReArrangedImplicitMOs) {
1432 ReArrangedImplicitMOs = false;
1433 for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
1434 MachineOperand &MO = MI.getOperand(I);
1435 if (!MO.isReg() || !MO.isUse())
1436 continue;
1437 Register Reg = MO.getReg();
1438 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1439 continue;
1440
1441 if (MO.isUndef()) {
1442 HasUndefUse = true;
1443 continue;
1444 }
1445
1446 // Populate MayLiveAcrossBlocks in case the use block is allocated before
1447 // the def block (removing the vreg uses).
1448 mayLiveIn(Reg);
1449
1450 assert(!MO.isInternalRead() && "Bundles not supported");
1451 assert(MO.readsReg() && "reading use");
1452 ReArrangedImplicitMOs = useVirtReg(MI, I, Reg);
1453 if (ReArrangedImplicitMOs)
1454 break;
1455 }
1456 }
1457
1458 // Allocate undef operands. This is a separate step because in a situation
1459 // like ` = OP undef %X, %X` both operands need the same register assign
1460 // so we should perform the normal assignment first.
1461 if (HasUndefUse) {
1462 for (MachineOperand &MO : MI.all_uses()) {
1463 Register Reg = MO.getReg();
1464 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1465 continue;
1466
1467 assert(MO.isUndef() && "Should only have undef virtreg uses left");
1468 allocVirtRegUndef(MO);
1469 }
1470 }
1471
1472 // Free early clobbers.
1473 if (HasEarlyClobber) {
1474 for (MachineOperand &MO : llvm::reverse(MI.all_defs())) {
1475 if (!MO.isEarlyClobber())
1476 continue;
1477 assert(!MO.getSubReg() && "should be already handled in def processing");
1478
1479 Register Reg = MO.getReg();
1480 if (!Reg)
1481 continue;
1482 if (Reg.isVirtual()) {
1483 assert(!shouldAllocateRegister(Reg));
1484 continue;
1485 }
1486 assert(Reg.isPhysical() && "should have register assigned");
1487
1488 // We sometimes get odd situations like:
1489 // early-clobber %x0 = INSTRUCTION %x0
1490 // which is semantically questionable as the early-clobber should
1491 // apply before the use. But in practice we consider the use to
1492 // happen before the early clobber now. Don't free the early clobber
1493 // register in this case.
1494 if (MI.readsRegister(Reg, TRI))
1495 continue;
1496
1497 freePhysReg(Reg);
1498 }
1499 }
1500
1501 LLVM_DEBUG(dbgs() << "<< " << MI);
1502 if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
1503 MI.getNumOperands() == 2) {
1504 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
1505 Coalesced.push_back(&MI);
1506 }
1507}
1508
1509void RegAllocFast::handleDebugValue(MachineInstr &MI) {
1510 // Ignore DBG_VALUEs that aren't based on virtual registers. These are
1511 // mostly constants and frame indices.
1512 assert(MI.isDebugValue() && "not a DBG_VALUE*");
1513 for (const auto &MO : MI.debug_operands()) {
1514 if (!MO.isReg())
1515 continue;
1516 Register Reg = MO.getReg();
1517 if (!Reg.isVirtual())
1518 continue;
1519 if (!shouldAllocateRegister(Reg))
1520 continue;
1521
1522 // Already spilled to a stackslot?
1523 int SS = StackSlotForVirtReg[Reg];
1524 if (SS != -1) {
1525 // Modify DBG_VALUE now that the value is in a spill slot.
1526 updateDbgValueForSpill(MI, SS, Reg);
1527 LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
1528 continue;
1529 }
1530
1531 // See if this virtual register has already been allocated to a physical
1532 // register or spilled to a stack slot.
1533 LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
1535 for (MachineOperand &Op : MI.getDebugOperandsForReg(Reg))
1536 DbgOps.push_back(&Op);
1537
1538 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
1539 // Update every use of Reg within MI.
1540 for (auto &RegMO : DbgOps)
1541 setPhysReg(MI, *RegMO, LRI->PhysReg);
1542 } else {
1543 DanglingDbgValues[Reg].push_back(&MI);
1544 }
1545
1546 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
1547 // that future spills of Reg will have DBG_VALUEs.
1548 LiveDbgValueMap[Reg].append(DbgOps.begin(), DbgOps.end());
1549 }
1550}
1551
1552void RegAllocFast::handleBundle(MachineInstr &MI) {
1553 MachineBasicBlock::instr_iterator BundledMI = MI.getIterator();
1554 ++BundledMI;
1555 while (BundledMI->isBundledWithPred()) {
1556 for (MachineOperand &MO : BundledMI->operands()) {
1557 if (!MO.isReg())
1558 continue;
1559
1560 Register Reg = MO.getReg();
1561 if (!Reg.isVirtual() || !shouldAllocateRegister(Reg))
1562 continue;
1563
1565 DI = BundleVirtRegsMap.find(Reg);
1566 assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register");
1567
1568 setPhysReg(MI, MO, DI->second);
1569 }
1570
1571 ++BundledMI;
1572 }
1573}
1574
1575void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
1576 this->MBB = &MBB;
1577 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
1578
1579 RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
1580 assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
1581
1582 for (const auto &LiveReg : MBB.liveouts())
1583 setPhysRegState(LiveReg.PhysReg, regPreAssigned);
1584
1585 Coalesced.clear();
1586
1587 // Traverse block in reverse order allocating instructions one by one.
1588 for (MachineInstr &MI : reverse(MBB)) {
1589 LLVM_DEBUG(
1590 dbgs() << "\n>> " << MI << "Regs:";
1591 dumpState()
1592 );
1593
1594 // Special handling for debug values. Note that they are not allowed to
1595 // affect codegen of the other instructions in any way.
1596 if (MI.isDebugValue()) {
1597 handleDebugValue(MI);
1598 continue;
1599 }
1600
1601 allocateInstruction(MI);
1602
1603 // Once BUNDLE header is assigned registers, same assignments need to be
1604 // done for bundled MIs.
1605 if (MI.getOpcode() == TargetOpcode::BUNDLE) {
1606 handleBundle(MI);
1607 }
1608 }
1609
1610 LLVM_DEBUG(
1611 dbgs() << "Begin Regs:";
1612 dumpState()
1613 );
1614
1615 // Spill all physical registers holding virtual registers now.
1616 LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
1617 reloadAtBegin(MBB);
1618
1619 // Erase all the coalesced copies. We are delaying it until now because
1620 // LiveVirtRegs might refer to the instrs.
1621 for (MachineInstr *MI : Coalesced)
1622 MBB.erase(MI);
1623 NumCoalesced += Coalesced.size();
1624
1625 for (auto &UDBGPair : DanglingDbgValues) {
1626 for (MachineInstr *DbgValue : UDBGPair.second) {
1627 assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
1628 // Nothing to do if the vreg was spilled in the meantime.
1629 if (!DbgValue->hasDebugOperandForReg(UDBGPair.first))
1630 continue;
1631 LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
1632 << '\n');
1633 DbgValue->setDebugValueUndef();
1634 }
1635 }
1636 DanglingDbgValues.clear();
1637
1638 LLVM_DEBUG(MBB.dump());
1639}
1640
1641bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) {
1642 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
1643 << "********** Function: " << MF.getName() << '\n');
1644 MRI = &MF.getRegInfo();
1645 const TargetSubtargetInfo &STI = MF.getSubtarget();
1646 TRI = STI.getRegisterInfo();
1647 TII = STI.getInstrInfo();
1648 MFI = &MF.getFrameInfo();
1649 MRI->freezeReservedRegs(MF);
1650 RegClassInfo.runOnMachineFunction(MF);
1651 unsigned NumRegUnits = TRI->getNumRegUnits();
1652 UsedInInstr.clear();
1653 UsedInInstr.setUniverse(NumRegUnits);
1654 PhysRegUses.clear();
1655 PhysRegUses.setUniverse(NumRegUnits);
1656
1657 // initialize the virtual->physical register map to have a 'null'
1658 // mapping for all virtual registers
1659 unsigned NumVirtRegs = MRI->getNumVirtRegs();
1660 StackSlotForVirtReg.resize(NumVirtRegs);
1661 LiveVirtRegs.setUniverse(NumVirtRegs);
1662 MayLiveAcrossBlocks.clear();
1663 MayLiveAcrossBlocks.resize(NumVirtRegs);
1664
1665 // Loop over all of the basic blocks, eliminating virtual register references
1666 for (MachineBasicBlock &MBB : MF)
1667 allocateBasicBlock(MBB);
1668
1669 if (ClearVirtRegs) {
1670 // All machine operands and other references to virtual registers have been
1671 // replaced. Remove the virtual registers.
1672 MRI->clearVirtRegs();
1673 }
1674
1675 StackSlotForVirtReg.clear();
1676 LiveDbgValueMap.clear();
1677 return true;
1678}
1679
1681 return new RegAllocFast();
1682}
1683
1685 bool ClearVirtRegs) {
1686 return new RegAllocFast(Ftor, ClearVirtRegs);
1687}
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file implements an indexed map.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
This file implements a map that provides insertion order iteration.
#define P(N)
if(VerifyEach)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static bool dominates(MachineBasicBlock &MBB, MachineBasicBlock::const_iterator A, MachineBasicBlock::const_iterator B)
static bool isCoalescable(const MachineInstr &MI)
static cl::opt< bool > IgnoreMissingDefs("rafast-ignore-missing-defs", cl::Hidden)
static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the SparseSet class derived from the version described in Briggs,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Class recording the (high level) value of a variable.
Iterator begin() const
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:269
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
BitVector & set()
Definition: BitVector.h:351
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
void resize(typename StorageT::size_type s)
Definition: IndexedMap.h:60
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static constexpr unsigned NoRegister
Definition: MCRegister.h:52
iterator_range< liveout_iterator > liveouts() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual MachineFunctionProperties getClearedProperties() const
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual MachineFunctionProperties getSetProperties() const
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool isDebugValueList() const
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:326
bool isNonListDebugValue() const
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:562
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
ArrayRef< MCPhysReg > getOrder(const TargetRegisterClass *RC) const
getOrder - Returns the preferred allocation order for RC.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
SparseSet - Fast set implementation for objects that can be identified by small unsigned keys.
Definition: SparseSet.h:124
typename DenseT::iterator iterator
Definition: SparseSet.h:171
typename DenseT::const_iterator const_iterator
Definition: SparseSet.h:172
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Iterator for intrusive lists based on ilist_node.
self_iterator getIterator()
Definition: ilist_node.h:82
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Kill
The last use of a register.
@ SS
Definition: X86.h:208
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1685
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
static bool allocateAllRegClasses(const TargetRegisterInfo &, const TargetRegisterClass &)
Default register class filter function for register allocation.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1734
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:429
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1652
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
std::function< bool(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC)> RegClassFilterFunc
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Pair of physical register and lane mask.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:233