LLVM 20.0.0git
InterferenceCache.cpp
Go to the documentation of this file.
1//===- InterferenceCache.cpp - Caching per-block interference -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InterferenceCache remembers per-block interference in LiveIntervalUnions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InterferenceCache.h"
14#include "llvm/ADT/ArrayRef.h"
22#include <cassert>
23#include <cstdint>
24#include <tuple>
25
26using namespace llvm;
27
28#define DEBUG_TYPE "regalloc"
29
30// Static member used for null interference cursors.
31const InterferenceCache::BlockInterference
32 InterferenceCache::Cursor::NoInterference;
33
34// Initializes PhysRegEntries (instead of a SmallVector, PhysRegEntries is a
35// buffer of size NumPhysRegs to speed up alloc/clear for targets with large
36// reg files). Calloced memory is used for good form, and quites tools like
37// Valgrind too, but zero initialized memory is not required by the algorithm:
38// this is because PhysRegEntries works like a SparseSet and its entries are
39// only valid when there is a corresponding CacheEntries assignment. There is
40// also support for when pass managers are reused for targets with different
41// numbers of PhysRegs: in this case PhysRegEntries is freed and reinitialized.
43 if (PhysRegEntriesCount == TRI->getNumRegs()) return;
44 free(PhysRegEntries);
45 PhysRegEntriesCount = TRI->getNumRegs();
46 PhysRegEntries = static_cast<unsigned char*>(
47 safe_calloc(PhysRegEntriesCount, sizeof(unsigned char)));
48}
49
51 LiveIntervalUnion *liuarray,
52 SlotIndexes *indexes,
53 LiveIntervals *lis,
54 const TargetRegisterInfo *tri) {
55 MF = mf;
56 LIUArray = liuarray;
57 TRI = tri;
59 for (Entry &E : Entries)
60 E.clear(mf, indexes, lis);
61}
62
63InterferenceCache::Entry *InterferenceCache::get(MCRegister PhysReg) {
64 unsigned char E = PhysRegEntries[PhysReg.id()];
65 if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
66 if (!Entries[E].valid(LIUArray, TRI))
67 Entries[E].revalidate(LIUArray, TRI);
68 return &Entries[E];
69 }
70 // No valid entry exists, pick the next round-robin entry.
71 E = RoundRobin;
72 if (++RoundRobin == CacheEntries)
73 RoundRobin = 0;
74 for (unsigned i = 0; i != CacheEntries; ++i) {
75 // Skip entries that are in use.
76 if (Entries[E].hasRefs()) {
77 if (++E == CacheEntries)
78 E = 0;
79 continue;
80 }
81 Entries[E].reset(PhysReg, LIUArray, TRI, MF);
82 PhysRegEntries[PhysReg] = E;
83 return &Entries[E];
84 }
85 llvm_unreachable("Ran out of interference cache entries.");
86}
87
88/// revalidate - LIU contents have changed, update tags.
89void InterferenceCache::Entry::revalidate(LiveIntervalUnion *LIUArray,
90 const TargetRegisterInfo *TRI) {
91 // Invalidate all block entries.
92 ++Tag;
93 // Invalidate all iterators.
94 PrevPos = SlotIndex();
95 unsigned i = 0;
96 for (MCRegUnit Unit : TRI->regunits(PhysReg))
97 RegUnits[i++].VirtTag = LIUArray[Unit].getTag();
98}
99
100void InterferenceCache::Entry::reset(MCRegister physReg,
101 LiveIntervalUnion *LIUArray,
102 const TargetRegisterInfo *TRI,
103 const MachineFunction *MF) {
104 assert(!hasRefs() && "Cannot reset cache entry with references");
105 // LIU's changed, invalidate cache.
106 ++Tag;
107 PhysReg = physReg;
108 Blocks.resize(MF->getNumBlockIDs());
109
110 // Reset iterators.
111 PrevPos = SlotIndex();
112 RegUnits.clear();
113 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
114 RegUnits.push_back(LIUArray[Unit]);
115 RegUnits.back().Fixed = &LIS->getRegUnit(Unit);
116 }
117}
118
119bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
120 const TargetRegisterInfo *TRI) {
121 unsigned i = 0, e = RegUnits.size();
122 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
123 if (i == e)
124 return false;
125 if (LIUArray[Unit].changedSince(RegUnits[i].VirtTag))
126 return false;
127 ++i;
128 }
129 return i == e;
130}
131
132void InterferenceCache::Entry::update(unsigned MBBNum) {
133 SlotIndex Start, Stop;
134 std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
135
136 // Use advanceTo only when possible.
137 if (PrevPos != Start) {
138 if (!PrevPos.isValid() || Start < PrevPos) {
139 for (RegUnitInfo &RUI : RegUnits) {
140 RUI.VirtI.find(Start);
141 RUI.FixedI = RUI.Fixed->find(Start);
142 }
143 } else {
144 for (RegUnitInfo &RUI : RegUnits) {
145 RUI.VirtI.advanceTo(Start);
146 if (RUI.FixedI != RUI.Fixed->end())
147 RUI.FixedI = RUI.Fixed->advanceTo(RUI.FixedI, Start);
148 }
149 }
150 PrevPos = Start;
151 }
152
154 MF->getBlockNumbered(MBBNum)->getIterator();
155 BlockInterference *BI = &Blocks[MBBNum];
156 ArrayRef<SlotIndex> RegMaskSlots;
157 ArrayRef<const uint32_t*> RegMaskBits;
158 while (true) {
159 BI->Tag = Tag;
160 BI->First = BI->Last = SlotIndex();
161
162 // Check for first interference from virtregs.
163 for (RegUnitInfo &RUI : RegUnits) {
165 if (!I.valid())
166 continue;
167 SlotIndex StartI = I.start();
168 if (StartI >= Stop)
169 continue;
170 if (!BI->First.isValid() || StartI < BI->First)
171 BI->First = StartI;
172 }
173
174 // Same thing for fixed interference.
175 for (RegUnitInfo &RUI : RegUnits) {
176 LiveInterval::const_iterator I = RUI.FixedI;
177 LiveInterval::const_iterator E = RUI.Fixed->end();
178 if (I == E)
179 continue;
180 SlotIndex StartI = I->start;
181 if (StartI >= Stop)
182 continue;
183 if (!BI->First.isValid() || StartI < BI->First)
184 BI->First = StartI;
185 }
186
187 // Also check for register mask interference.
188 RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
189 RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
190 SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
191 for (unsigned i = 0, e = RegMaskSlots.size();
192 i != e && RegMaskSlots[i] < Limit; ++i)
193 if (MachineOperand::clobbersPhysReg(RegMaskBits[i], PhysReg)) {
194 // Register mask i clobbers PhysReg before the LIU interference.
195 BI->First = RegMaskSlots[i];
196 break;
197 }
198
199 PrevPos = Stop;
200 if (BI->First.isValid())
201 break;
202
203 // No interference in this block? Go ahead and precompute the next block.
204 if (++MFI == MF->end())
205 return;
206 MBBNum = MFI->getNumber();
207 BI = &Blocks[MBBNum];
208 if (BI->Tag == Tag)
209 return;
210 std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
211 }
212
213 // Check for last interference in block.
214 for (RegUnitInfo &RUI : RegUnits) {
216 if (!I.valid() || I.start() >= Stop)
217 continue;
218 I.advanceTo(Stop);
219 bool Backup = !I.valid() || I.start() >= Stop;
220 if (Backup)
221 --I;
222 SlotIndex StopI = I.stop();
223 if (!BI->Last.isValid() || StopI > BI->Last)
224 BI->Last = StopI;
225 if (Backup)
226 ++I;
227 }
228
229 // Fixed interference.
230 for (RegUnitInfo &RUI : RegUnits) {
231 LiveInterval::iterator &I = RUI.FixedI;
232 LiveRange *LR = RUI.Fixed;
233 if (I == LR->end() || I->start >= Stop)
234 continue;
235 I = LR->advanceTo(I, Stop);
236 bool Backup = I == LR->end() || I->start >= Stop;
237 if (Backup)
238 --I;
239 SlotIndex StopI = I->end;
240 if (!BI->Last.isValid() || StopI > BI->Last)
241 BI->Last = StopI;
242 if (Backup)
243 ++I;
244 }
245
246 // Also check for register mask interference.
247 SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
248 for (unsigned i = RegMaskSlots.size();
249 i && RegMaskSlots[i-1].getDeadSlot() > Limit; --i)
250 if (MachineOperand::clobbersPhysReg(RegMaskBits[i-1], PhysReg)) {
251 // Register mask i-1 clobbers PhysReg after the LIU interference.
252 // Model the regmask clobber as a dead def.
253 BI->Last = RegMaskSlots[i-1].getDeadSlot();
254 break;
255 }
256}
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:507
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
void init(MachineFunction *mf, LiveIntervalUnion *liuarray, SlotIndexes *indexes, LiveIntervals *lis, const TargetRegisterInfo *tri)
init - Prepare cache for a new function.
Union of live intervals that are strong candidates for coalescing into a single register (either phys...
LiveSegments::iterator SegmentIter
unsigned getTag() const
getTag - Return an opaque tag representing the current state of the union.
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
iterator advanceTo(iterator I, SlotIndex Pos)
advanceTo - Advance the specified iterator to point to the Segment containing the specified position,...
Definition: LiveInterval.h:271
iterator end()
Definition: LiveInterval.h:216
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
constexpr unsigned id() const
Definition: MCRegister.h:79
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
BasicBlockListType::const_iterator const_iterator
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
bool isValid() const
Returns true if this is a valid index.
Definition: SlotIndexes.h:130
SlotIndexes pass.
Definition: SlotIndexes.h:297
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr double e
Definition: MathExtras.h:47
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ATTRIBUTE_RETURNS_NONNULL void * safe_calloc(size_t Count, size_t Sz)
Definition: MemAlloc.h:38
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.