Bug Summary

File:llvm/include/llvm/CodeGen/RegAllocPBQP.h
Warning:line 73, column 7
Use of zero-allocated memory

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name RegAllocPBQP.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/CodeGen -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp

1//===- RegAllocPBQP.cpp ---- PBQP Register Allocator ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a Partitioned Boolean Quadratic Programming (PBQP) based
10// register allocator for LLVM. This allocator works by constructing a PBQP
11// problem representing the register allocation problem under consideration,
12// solving this using a PBQP solver, and mapping the solution back to a
13// register assignment. If any variables are selected for spilling then spill
14// code is inserted and the process repeated.
15//
16// The PBQP solver (pbqp.c) provided for this allocator uses a heuristic tuned
17// for register allocation. For more information on PBQP for register
18// allocation, see the following papers:
19//
20// (1) Hames, L. and Scholz, B. 2006. Nearly optimal register allocation with
21// PBQP. In Proceedings of the 7th Joint Modular Languages Conference
22// (JMLC'06). LNCS, vol. 4228. Springer, New York, NY, USA. 346-361.
23//
24// (2) Scholz, B., Eckstein, E. 2002. Register allocation for irregular
25// architectures. In Proceedings of the Joint Conference on Languages,
26// Compilers and Tools for Embedded Systems (LCTES'02), ACM Press, New York,
27// NY, USA, 139-148.
28//
29//===----------------------------------------------------------------------===//
30
31#include "llvm/CodeGen/RegAllocPBQP.h"
32#include "RegisterCoalescer.h"
33#include "llvm/ADT/ArrayRef.h"
34#include "llvm/ADT/BitVector.h"
35#include "llvm/ADT/DenseMap.h"
36#include "llvm/ADT/DenseSet.h"
37#include "llvm/ADT/STLExtras.h"
38#include "llvm/ADT/SmallPtrSet.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/ADT/StringRef.h"
41#include "llvm/Analysis/AliasAnalysis.h"
42#include "llvm/CodeGen/CalcSpillWeights.h"
43#include "llvm/CodeGen/LiveInterval.h"
44#include "llvm/CodeGen/LiveIntervals.h"
45#include "llvm/CodeGen/LiveRangeEdit.h"
46#include "llvm/CodeGen/LiveStacks.h"
47#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
48#include "llvm/CodeGen/MachineDominators.h"
49#include "llvm/CodeGen/MachineFunction.h"
50#include "llvm/CodeGen/MachineFunctionPass.h"
51#include "llvm/CodeGen/MachineInstr.h"
52#include "llvm/CodeGen/MachineLoopInfo.h"
53#include "llvm/CodeGen/MachineRegisterInfo.h"
54#include "llvm/CodeGen/PBQP/Graph.h"
55#include "llvm/CodeGen/PBQP/Math.h"
56#include "llvm/CodeGen/PBQP/Solution.h"
57#include "llvm/CodeGen/PBQPRAConstraint.h"
58#include "llvm/CodeGen/RegAllocRegistry.h"
59#include "llvm/CodeGen/SlotIndexes.h"
60#include "llvm/CodeGen/Spiller.h"
61#include "llvm/CodeGen/TargetRegisterInfo.h"
62#include "llvm/CodeGen/TargetSubtargetInfo.h"
63#include "llvm/CodeGen/VirtRegMap.h"
64#include "llvm/Config/llvm-config.h"
65#include "llvm/IR/Function.h"
66#include "llvm/IR/Module.h"
67#include "llvm/MC/MCRegisterInfo.h"
68#include "llvm/Pass.h"
69#include "llvm/Support/CommandLine.h"
70#include "llvm/Support/Compiler.h"
71#include "llvm/Support/Debug.h"
72#include "llvm/Support/FileSystem.h"
73#include "llvm/Support/Printable.h"
74#include "llvm/Support/raw_ostream.h"
75#include <algorithm>
76#include <cassert>
77#include <cstddef>
78#include <limits>
79#include <map>
80#include <memory>
81#include <queue>
82#include <set>
83#include <sstream>
84#include <string>
85#include <system_error>
86#include <tuple>
87#include <utility>
88#include <vector>
89
90using namespace llvm;
91
92#define DEBUG_TYPE"regalloc" "regalloc"
93
94static RegisterRegAlloc
95RegisterPBQPRepAlloc("pbqp", "PBQP register allocator",
96 createDefaultPBQPRegisterAllocator);
97
98static cl::opt<bool>
99PBQPCoalescing("pbqp-coalescing",
100 cl::desc("Attempt coalescing during PBQP register allocation."),
101 cl::init(false), cl::Hidden);
102
103#ifndef NDEBUG
104static cl::opt<bool>
105PBQPDumpGraphs("pbqp-dump-graphs",
106 cl::desc("Dump graphs for each function/round in the compilation unit."),
107 cl::init(false), cl::Hidden);
108#endif
109
110namespace {
111
112///
113/// PBQP based allocators solve the register allocation problem by mapping
114/// register allocation problems to Partitioned Boolean Quadratic
115/// Programming problems.
116class RegAllocPBQP : public MachineFunctionPass {
117public:
118 static char ID;
119
120 /// Construct a PBQP register allocator.
121 RegAllocPBQP(char *cPassID = nullptr)
122 : MachineFunctionPass(ID), customPassID(cPassID) {
123 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
124 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
125 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
126 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
127 }
128
129 /// Return the pass name.
130 StringRef getPassName() const override { return "PBQP Register Allocator"; }
131
132 /// PBQP analysis usage.
133 void getAnalysisUsage(AnalysisUsage &au) const override;
134
135 /// Perform register allocation
136 bool runOnMachineFunction(MachineFunction &MF) override;
137
138 MachineFunctionProperties getRequiredProperties() const override {
139 return MachineFunctionProperties().set(
140 MachineFunctionProperties::Property::NoPHIs);
141 }
142
143 MachineFunctionProperties getClearedProperties() const override {
144 return MachineFunctionProperties().set(
145 MachineFunctionProperties::Property::IsSSA);
146 }
147
148private:
149 using RegSet = std::set<Register>;
150
151 char *customPassID;
152
153 RegSet VRegsToAlloc, EmptyIntervalVRegs;
154
155 /// Inst which is a def of an original reg and whose defs are already all
156 /// dead after remat is saved in DeadRemats. The deletion of such inst is
157 /// postponed till all the allocations are done, so its remat expr is
158 /// always available for the remat of all the siblings of the original reg.
159 SmallPtrSet<MachineInstr *, 32> DeadRemats;
160
161 /// Finds the initial set of vreg intervals to allocate.
162 void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS);
163
164 /// Constructs an initial graph.
165 void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller);
166
167 /// Spill the given VReg.
168 void spillVReg(Register VReg, SmallVectorImpl<Register> &NewIntervals,
169 MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
170 Spiller &VRegSpiller);
171
172 /// Given a solved PBQP problem maps this solution back to a register
173 /// assignment.
174 bool mapPBQPToRegAlloc(const PBQPRAGraph &G,
175 const PBQP::Solution &Solution,
176 VirtRegMap &VRM,
177 Spiller &VRegSpiller);
178
179 /// Postprocessing before final spilling. Sets basic block "live in"
180 /// variables.
181 void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS,
182 VirtRegMap &VRM) const;
183
184 void postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS);
185};
186
187char RegAllocPBQP::ID = 0;
188
189/// Set spill costs for each node in the PBQP reg-alloc graph.
190class SpillCosts : public PBQPRAConstraint {
191public:
192 void apply(PBQPRAGraph &G) override {
193 LiveIntervals &LIS = G.getMetadata().LIS;
194
195 // A minimum spill costs, so that register constraints can can be set
196 // without normalization in the [0.0:MinSpillCost( interval.
197 const PBQP::PBQPNum MinSpillCost = 10.0;
198
199 for (auto NId : G.nodeIds()) {
200 PBQP::PBQPNum SpillCost =
201 LIS.getInterval(G.getNodeMetadata(NId).getVReg()).weight();
202 if (SpillCost == 0.0)
203 SpillCost = std::numeric_limits<PBQP::PBQPNum>::min();
204 else
205 SpillCost += MinSpillCost;
206 PBQPRAGraph::RawVector NodeCosts(G.getNodeCosts(NId));
207 NodeCosts[PBQP::RegAlloc::getSpillOptionIdx()] = SpillCost;
208 G.setNodeCosts(NId, std::move(NodeCosts));
209 }
210 }
211};
212
213/// Add interference edges between overlapping vregs.
214class Interference : public PBQPRAConstraint {
215private:
216 using AllowedRegVecPtr = const PBQP::RegAlloc::AllowedRegVector *;
217 using IKey = std::pair<AllowedRegVecPtr, AllowedRegVecPtr>;
218 using IMatrixCache = DenseMap<IKey, PBQPRAGraph::MatrixPtr>;
219 using DisjointAllowedRegsCache = DenseSet<IKey>;
220 using IEdgeKey = std::pair<PBQP::GraphBase::NodeId, PBQP::GraphBase::NodeId>;
221 using IEdgeCache = DenseSet<IEdgeKey>;
222
223 bool haveDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
224 PBQPRAGraph::NodeId MId,
225 const DisjointAllowedRegsCache &D) const {
226 const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
227 const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
228
229 if (NRegs == MRegs)
230 return false;
231
232 if (NRegs < MRegs)
233 return D.contains(IKey(NRegs, MRegs));
234
235 return D.contains(IKey(MRegs, NRegs));
236 }
237
238 void setDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
239 PBQPRAGraph::NodeId MId,
240 DisjointAllowedRegsCache &D) {
241 const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
242 const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
243
244 assert(NRegs != MRegs && "AllowedRegs can not be disjoint with itself")((NRegs != MRegs && "AllowedRegs can not be disjoint with itself"
) ? static_cast<void> (0) : __assert_fail ("NRegs != MRegs && \"AllowedRegs can not be disjoint with itself\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 244, __PRETTY_FUNCTION__))
;
245
246 if (NRegs < MRegs)
247 D.insert(IKey(NRegs, MRegs));
248 else
249 D.insert(IKey(MRegs, NRegs));
250 }
251
252 // Holds (Interval, CurrentSegmentID, and NodeId). The first two are required
253 // for the fast interference graph construction algorithm. The last is there
254 // to save us from looking up node ids via the VRegToNode map in the graph
255 // metadata.
256 using IntervalInfo =
257 std::tuple<LiveInterval*, size_t, PBQP::GraphBase::NodeId>;
258
259 static SlotIndex getStartPoint(const IntervalInfo &I) {
260 return std::get<0>(I)->segments[std::get<1>(I)].start;
261 }
262
263 static SlotIndex getEndPoint(const IntervalInfo &I) {
264 return std::get<0>(I)->segments[std::get<1>(I)].end;
265 }
266
267 static PBQP::GraphBase::NodeId getNodeId(const IntervalInfo &I) {
268 return std::get<2>(I);
269 }
270
271 static bool lowestStartPoint(const IntervalInfo &I1,
272 const IntervalInfo &I2) {
273 // Condition reversed because priority queue has the *highest* element at
274 // the front, rather than the lowest.
275 return getStartPoint(I1) > getStartPoint(I2);
276 }
277
278 static bool lowestEndPoint(const IntervalInfo &I1,
279 const IntervalInfo &I2) {
280 SlotIndex E1 = getEndPoint(I1);
281 SlotIndex E2 = getEndPoint(I2);
282
283 if (E1 < E2)
284 return true;
285
286 if (E1 > E2)
287 return false;
288
289 // If two intervals end at the same point, we need a way to break the tie or
290 // the set will assume they're actually equal and refuse to insert a
291 // "duplicate". Just compare the vregs - fast and guaranteed unique.
292 return std::get<0>(I1)->reg() < std::get<0>(I2)->reg();
293 }
294
295 static bool isAtLastSegment(const IntervalInfo &I) {
296 return std::get<1>(I) == std::get<0>(I)->size() - 1;
297 }
298
299 static IntervalInfo nextSegment(const IntervalInfo &I) {
300 return std::make_tuple(std::get<0>(I), std::get<1>(I) + 1, std::get<2>(I));
301 }
302
303public:
304 void apply(PBQPRAGraph &G) override {
305 // The following is loosely based on the linear scan algorithm introduced in
306 // "Linear Scan Register Allocation" by Poletto and Sarkar. This version
307 // isn't linear, because the size of the active set isn't bound by the
308 // number of registers, but rather the size of the largest clique in the
309 // graph. Still, we expect this to be better than N^2.
310 LiveIntervals &LIS = G.getMetadata().LIS;
311
312 // Interferenc matrices are incredibly regular - they're only a function of
313 // the allowed sets, so we cache them to avoid the overhead of constructing
314 // and uniquing them.
315 IMatrixCache C;
316
317 // Finding an edge is expensive in the worst case (O(max_clique(G))). So
318 // cache locally edges we have already seen.
319 IEdgeCache EC;
320
321 // Cache known disjoint allowed registers pairs
322 DisjointAllowedRegsCache D;
323
324 using IntervalSet = std::set<IntervalInfo, decltype(&lowestEndPoint)>;
325 using IntervalQueue =
326 std::priority_queue<IntervalInfo, std::vector<IntervalInfo>,
327 decltype(&lowestStartPoint)>;
328 IntervalSet Active(lowestEndPoint);
329 IntervalQueue Inactive(lowestStartPoint);
330
331 // Start by building the inactive set.
332 for (auto NId : G.nodeIds()) {
333 Register VReg = G.getNodeMetadata(NId).getVReg();
334 LiveInterval &LI = LIS.getInterval(VReg);
335 assert(!LI.empty() && "PBQP graph contains node for empty interval")((!LI.empty() && "PBQP graph contains node for empty interval"
) ? static_cast<void> (0) : __assert_fail ("!LI.empty() && \"PBQP graph contains node for empty interval\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 335, __PRETTY_FUNCTION__))
;
336 Inactive.push(std::make_tuple(&LI, 0, NId));
337 }
338
339 while (!Inactive.empty()) {
340 // Tentatively grab the "next" interval - this choice may be overriden
341 // below.
342 IntervalInfo Cur = Inactive.top();
343
344 // Retire any active intervals that end before Cur starts.
345 IntervalSet::iterator RetireItr = Active.begin();
346 while (RetireItr != Active.end() &&
347 (getEndPoint(*RetireItr) <= getStartPoint(Cur))) {
348 // If this interval has subsequent segments, add the next one to the
349 // inactive list.
350 if (!isAtLastSegment(*RetireItr))
351 Inactive.push(nextSegment(*RetireItr));
352
353 ++RetireItr;
354 }
355 Active.erase(Active.begin(), RetireItr);
356
357 // One of the newly retired segments may actually start before the
358 // Cur segment, so re-grab the front of the inactive list.
359 Cur = Inactive.top();
360 Inactive.pop();
361
362 // At this point we know that Cur overlaps all active intervals. Add the
363 // interference edges.
364 PBQP::GraphBase::NodeId NId = getNodeId(Cur);
365 for (const auto &A : Active) {
366 PBQP::GraphBase::NodeId MId = getNodeId(A);
367
368 // Do not add an edge when the nodes' allowed registers do not
369 // intersect: there is obviously no interference.
370 if (haveDisjointAllowedRegs(G, NId, MId, D))
371 continue;
372
373 // Check that we haven't already added this edge
374 IEdgeKey EK(std::min(NId, MId), std::max(NId, MId));
375 if (EC.count(EK))
376 continue;
377
378 // This is a new edge - add it to the graph.
379 if (!createInterferenceEdge(G, NId, MId, C))
380 setDisjointAllowedRegs(G, NId, MId, D);
381 else
382 EC.insert(EK);
383 }
384
385 // Finally, add Cur to the Active set.
386 Active.insert(Cur);
387 }
388 }
389
390private:
391 // Create an Interference edge and add it to the graph, unless it is
392 // a null matrix, meaning the nodes' allowed registers do not have any
393 // interference. This case occurs frequently between integer and floating
394 // point registers for example.
395 // return true iff both nodes interferes.
396 bool createInterferenceEdge(PBQPRAGraph &G,
397 PBQPRAGraph::NodeId NId, PBQPRAGraph::NodeId MId,
398 IMatrixCache &C) {
399 const TargetRegisterInfo &TRI =
400 *G.getMetadata().MF.getSubtarget().getRegisterInfo();
401 const auto &NRegs = G.getNodeMetadata(NId).getAllowedRegs();
402 const auto &MRegs = G.getNodeMetadata(MId).getAllowedRegs();
403
404 // Try looking the edge costs up in the IMatrixCache first.
405 IKey K(&NRegs, &MRegs);
406 IMatrixCache::iterator I = C.find(K);
407 if (I != C.end()) {
408 G.addEdgeBypassingCostAllocator(NId, MId, I->second);
409 return true;
410 }
411
412 PBQPRAGraph::RawMatrix M(NRegs.size() + 1, MRegs.size() + 1, 0);
413 bool NodesInterfere = false;
414 for (unsigned I = 0; I != NRegs.size(); ++I) {
415 MCRegister PRegN = NRegs[I];
416 for (unsigned J = 0; J != MRegs.size(); ++J) {
417 MCRegister PRegM = MRegs[J];
418 if (TRI.regsOverlap(PRegN, PRegM)) {
419 M[I + 1][J + 1] = std::numeric_limits<PBQP::PBQPNum>::infinity();
420 NodesInterfere = true;
421 }
422 }
423 }
424
425 if (!NodesInterfere)
426 return false;
427
428 PBQPRAGraph::EdgeId EId = G.addEdge(NId, MId, std::move(M));
429 C[K] = G.getEdgeCostsPtr(EId);
430
431 return true;
432 }
433};
434
435class Coalescing : public PBQPRAConstraint {
436public:
437 void apply(PBQPRAGraph &G) override {
438 MachineFunction &MF = G.getMetadata().MF;
439 MachineBlockFrequencyInfo &MBFI = G.getMetadata().MBFI;
440 CoalescerPair CP(*MF.getSubtarget().getRegisterInfo());
441
442 // Scan the machine function and add a coalescing cost whenever CoalescerPair
443 // gives the Ok.
444 for (const auto &MBB : MF) {
445 for (const auto &MI : MBB) {
446 // Skip not-coalescable or already coalesced copies.
447 if (!CP.setRegisters(&MI) || CP.getSrcReg() == CP.getDstReg())
1
Assuming the condition is false
2
Taking false branch
448 continue;
449
450 Register DstReg = CP.getDstReg();
451 Register SrcReg = CP.getSrcReg();
452
453 PBQP::PBQPNum CBenefit = MBFI.getBlockFreqRelativeToEntryBlock(&MBB);
454
455 if (CP.isPhys()) {
3
Taking false branch
456 if (!MF.getRegInfo().isAllocatable(DstReg))
457 continue;
458
459 PBQPRAGraph::NodeId NId = G.getMetadata().getNodeIdForVReg(SrcReg);
460
461 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed =
462 G.getNodeMetadata(NId).getAllowedRegs();
463
464 unsigned PRegOpt = 0;
465 while (PRegOpt < Allowed.size() && Allowed[PRegOpt].id() != DstReg)
466 ++PRegOpt;
467
468 if (PRegOpt < Allowed.size()) {
469 PBQPRAGraph::RawVector NewCosts(G.getNodeCosts(NId));
470 NewCosts[PRegOpt + 1] -= CBenefit;
471 G.setNodeCosts(NId, std::move(NewCosts));
472 }
473 } else {
474 PBQPRAGraph::NodeId N1Id = G.getMetadata().getNodeIdForVReg(DstReg);
475 PBQPRAGraph::NodeId N2Id = G.getMetadata().getNodeIdForVReg(SrcReg);
476 const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed1 =
477 &G.getNodeMetadata(N1Id).getAllowedRegs();
478 const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed2 =
479 &G.getNodeMetadata(N2Id).getAllowedRegs();
480
481 PBQPRAGraph::EdgeId EId = G.findEdge(N1Id, N2Id);
482 if (EId == G.invalidEdgeId()) {
4
Assuming the condition is true
5
Taking true branch
483 PBQPRAGraph::RawMatrix Costs(Allowed1->size() + 1,
484 Allowed2->size() + 1, 0);
485 addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
486 G.addEdge(N1Id, N2Id, std::move(Costs));
6
Calling 'Graph::addEdge'
487 } else {
488 if (G.getEdgeNode1Id(EId) == N2Id) {
489 std::swap(N1Id, N2Id);
490 std::swap(Allowed1, Allowed2);
491 }
492 PBQPRAGraph::RawMatrix Costs(G.getEdgeCosts(EId));
493 addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
494 G.updateEdgeCosts(EId, std::move(Costs));
495 }
496 }
497 }
498 }
499 }
500
501private:
502 void addVirtRegCoalesce(
503 PBQPRAGraph::RawMatrix &CostMat,
504 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed1,
505 const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed2,
506 PBQP::PBQPNum Benefit) {
507 assert(CostMat.getRows() == Allowed1.size() + 1 && "Size mismatch.")((CostMat.getRows() == Allowed1.size() + 1 && "Size mismatch."
) ? static_cast<void> (0) : __assert_fail ("CostMat.getRows() == Allowed1.size() + 1 && \"Size mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 507, __PRETTY_FUNCTION__))
;
508 assert(CostMat.getCols() == Allowed2.size() + 1 && "Size mismatch.")((CostMat.getCols() == Allowed2.size() + 1 && "Size mismatch."
) ? static_cast<void> (0) : __assert_fail ("CostMat.getCols() == Allowed2.size() + 1 && \"Size mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 508, __PRETTY_FUNCTION__))
;
509 for (unsigned I = 0; I != Allowed1.size(); ++I) {
510 MCRegister PReg1 = Allowed1[I];
511 for (unsigned J = 0; J != Allowed2.size(); ++J) {
512 MCRegister PReg2 = Allowed2[J];
513 if (PReg1 == PReg2)
514 CostMat[I + 1][J + 1] -= Benefit;
515 }
516 }
517 }
518};
519
520/// PBQP-specific implementation of weight normalization.
521class PBQPVirtRegAuxInfo final : public VirtRegAuxInfo {
522 float normalize(float UseDefFreq, unsigned Size, unsigned NumInstr) override {
523 // All intervals have a spill weight that is mostly proportional to the
524 // number of uses, with uses in loops having a bigger weight.
525 return NumInstr * VirtRegAuxInfo::normalize(UseDefFreq, Size, 1);
526 }
527
528public:
529 PBQPVirtRegAuxInfo(MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
530 const MachineLoopInfo &Loops,
531 const MachineBlockFrequencyInfo &MBFI)
532 : VirtRegAuxInfo(MF, LIS, VRM, Loops, MBFI) {}
533};
534} // end anonymous namespace
535
536// Out-of-line destructor/anchor for PBQPRAConstraint.
537PBQPRAConstraint::~PBQPRAConstraint() = default;
538
539void PBQPRAConstraint::anchor() {}
540
541void PBQPRAConstraintList::anchor() {}
542
543void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
544 au.setPreservesCFG();
545 au.addRequired<AAResultsWrapperPass>();
546 au.addPreserved<AAResultsWrapperPass>();
547 au.addRequired<SlotIndexes>();
548 au.addPreserved<SlotIndexes>();
549 au.addRequired<LiveIntervals>();
550 au.addPreserved<LiveIntervals>();
551 //au.addRequiredID(SplitCriticalEdgesID);
552 if (customPassID)
553 au.addRequiredID(*customPassID);
554 au.addRequired<LiveStacks>();
555 au.addPreserved<LiveStacks>();
556 au.addRequired<MachineBlockFrequencyInfo>();
557 au.addPreserved<MachineBlockFrequencyInfo>();
558 au.addRequired<MachineLoopInfo>();
559 au.addPreserved<MachineLoopInfo>();
560 au.addRequired<MachineDominatorTree>();
561 au.addPreserved<MachineDominatorTree>();
562 au.addRequired<VirtRegMap>();
563 au.addPreserved<VirtRegMap>();
564 MachineFunctionPass::getAnalysisUsage(au);
565}
566
567void RegAllocPBQP::findVRegIntervalsToAlloc(const MachineFunction &MF,
568 LiveIntervals &LIS) {
569 const MachineRegisterInfo &MRI = MF.getRegInfo();
570
571 // Iterate over all live ranges.
572 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
573 Register Reg = Register::index2VirtReg(I);
574 if (MRI.reg_nodbg_empty(Reg))
575 continue;
576 VRegsToAlloc.insert(Reg);
577 }
578}
579
580static bool isACalleeSavedRegister(MCRegister Reg,
581 const TargetRegisterInfo &TRI,
582 const MachineFunction &MF) {
583 const MCPhysReg *CSR = MF.getRegInfo().getCalleeSavedRegs();
584 for (unsigned i = 0; CSR[i] != 0; ++i)
585 if (TRI.regsOverlap(Reg, CSR[i]))
586 return true;
587 return false;
588}
589
590void RegAllocPBQP::initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM,
591 Spiller &VRegSpiller) {
592 MachineFunction &MF = G.getMetadata().MF;
593
594 LiveIntervals &LIS = G.getMetadata().LIS;
595 const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
596 const TargetRegisterInfo &TRI =
597 *G.getMetadata().MF.getSubtarget().getRegisterInfo();
598
599 std::vector<Register> Worklist(VRegsToAlloc.begin(), VRegsToAlloc.end());
600
601 std::map<Register, std::vector<MCRegister>> VRegAllowedMap;
602
603 while (!Worklist.empty()) {
604 Register VReg = Worklist.back();
605 Worklist.pop_back();
606
607 LiveInterval &VRegLI = LIS.getInterval(VReg);
608
609 // If this is an empty interval move it to the EmptyIntervalVRegs set then
610 // continue.
611 if (VRegLI.empty()) {
612 EmptyIntervalVRegs.insert(VRegLI.reg());
613 VRegsToAlloc.erase(VRegLI.reg());
614 continue;
615 }
616
617 const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
618
619 // Record any overlaps with regmask operands.
620 BitVector RegMaskOverlaps;
621 LIS.checkRegMaskInterference(VRegLI, RegMaskOverlaps);
622
623 // Compute an initial allowed set for the current vreg.
624 std::vector<MCRegister> VRegAllowed;
625 ArrayRef<MCPhysReg> RawPRegOrder = TRC->getRawAllocationOrder(MF);
626 for (unsigned I = 0; I != RawPRegOrder.size(); ++I) {
627 MCRegister PReg(RawPRegOrder[I]);
628 if (MRI.isReserved(PReg))
629 continue;
630
631 // vregLI crosses a regmask operand that clobbers preg.
632 if (!RegMaskOverlaps.empty() && !RegMaskOverlaps.test(PReg))
633 continue;
634
635 // vregLI overlaps fixed regunit interference.
636 bool Interference = false;
637 for (MCRegUnitIterator Units(PReg, &TRI); Units.isValid(); ++Units) {
638 if (VRegLI.overlaps(LIS.getRegUnit(*Units))) {
639 Interference = true;
640 break;
641 }
642 }
643 if (Interference)
644 continue;
645
646 // preg is usable for this virtual register.
647 VRegAllowed.push_back(PReg);
648 }
649
650 // Check for vregs that have no allowed registers. These should be
651 // pre-spilled and the new vregs added to the worklist.
652 if (VRegAllowed.empty()) {
653 SmallVector<Register, 8> NewVRegs;
654 spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
655 llvm::append_range(Worklist, NewVRegs);
656 continue;
657 }
658
659 VRegAllowedMap[VReg.id()] = std::move(VRegAllowed);
660 }
661
662 for (auto &KV : VRegAllowedMap) {
663 auto VReg = KV.first;
664
665 // Move empty intervals to the EmptyIntervalVReg set.
666 if (LIS.getInterval(VReg).empty()) {
667 EmptyIntervalVRegs.insert(VReg);
668 VRegsToAlloc.erase(VReg);
669 continue;
670 }
671
672 auto &VRegAllowed = KV.second;
673
674 PBQPRAGraph::RawVector NodeCosts(VRegAllowed.size() + 1, 0);
675
676 // Tweak cost of callee saved registers, as using then force spilling and
677 // restoring them. This would only happen in the prologue / epilogue though.
678 for (unsigned i = 0; i != VRegAllowed.size(); ++i)
679 if (isACalleeSavedRegister(VRegAllowed[i], TRI, MF))
680 NodeCosts[1 + i] += 1.0;
681
682 PBQPRAGraph::NodeId NId = G.addNode(std::move(NodeCosts));
683 G.getNodeMetadata(NId).setVReg(VReg);
684 G.getNodeMetadata(NId).setAllowedRegs(
685 G.getMetadata().getAllowedRegs(std::move(VRegAllowed)));
686 G.getMetadata().setNodeIdForVReg(VReg, NId);
687 }
688}
689
690void RegAllocPBQP::spillVReg(Register VReg,
691 SmallVectorImpl<Register> &NewIntervals,
692 MachineFunction &MF, LiveIntervals &LIS,
693 VirtRegMap &VRM, Spiller &VRegSpiller) {
694 VRegsToAlloc.erase(VReg);
695 LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM,
696 nullptr, &DeadRemats);
697 VRegSpiller.spill(LRE);
698
699 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
700 (void)TRI;
701 LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> SPILLED (Cost: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> SPILLED (Cost: " << LRE.getParent
().weight() << ", New vregs: "; } } while (false)
702 << LRE.getParent().weight() << ", New vregs: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> SPILLED (Cost: " << LRE.getParent
().weight() << ", New vregs: "; } } while (false)
;
703
704 // Copy any newly inserted live intervals into the list of regs to
705 // allocate.
706 for (const Register &R : LRE) {
707 const LiveInterval &LI = LIS.getInterval(R);
708 assert(!LI.empty() && "Empty spill range.")((!LI.empty() && "Empty spill range.") ? static_cast<
void> (0) : __assert_fail ("!LI.empty() && \"Empty spill range.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 708, __PRETTY_FUNCTION__))
;
709 LLVM_DEBUG(dbgs() << printReg(LI.reg(), &TRI) << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << printReg(LI.reg(), &TRI) <<
" "; } } while (false)
;
710 VRegsToAlloc.insert(LI.reg());
711 }
712
713 LLVM_DEBUG(dbgs() << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << ")\n"; } } while (false)
;
714}
715
716bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
717 const PBQP::Solution &Solution,
718 VirtRegMap &VRM,
719 Spiller &VRegSpiller) {
720 MachineFunction &MF = G.getMetadata().MF;
721 LiveIntervals &LIS = G.getMetadata().LIS;
722 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
723 (void)TRI;
724
725 // Set to true if we have any spills
726 bool AnotherRoundNeeded = false;
727
728 // Clear the existing allocation.
729 VRM.clearAllVirt();
730
731 // Iterate over the nodes mapping the PBQP solution to a register
732 // assignment.
733 for (auto NId : G.nodeIds()) {
734 Register VReg = G.getNodeMetadata(NId).getVReg();
735 unsigned AllocOpt = Solution.getSelection(NId);
736
737 if (AllocOpt != PBQP::RegAlloc::getSpillOptionIdx()) {
738 MCRegister PReg = G.getNodeMetadata(NId).getAllowedRegs()[AllocOpt - 1];
739 LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> " << TRI.getName(PReg) <<
"\n"; } } while (false)
740 << TRI.getName(PReg) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "VREG " << printReg(VReg
, &TRI) << " -> " << TRI.getName(PReg) <<
"\n"; } } while (false)
;
741 assert(PReg != 0 && "Invalid preg selected.")((PReg != 0 && "Invalid preg selected.") ? static_cast
<void> (0) : __assert_fail ("PReg != 0 && \"Invalid preg selected.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 741, __PRETTY_FUNCTION__))
;
742 VRM.assignVirt2Phys(VReg, PReg);
743 } else {
744 // Spill VReg. If this introduces new intervals we'll need another round
745 // of allocation.
746 SmallVector<Register, 8> NewVRegs;
747 spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
748 AnotherRoundNeeded |= !NewVRegs.empty();
749 }
750 }
751
752 return !AnotherRoundNeeded;
753}
754
755void RegAllocPBQP::finalizeAlloc(MachineFunction &MF,
756 LiveIntervals &LIS,
757 VirtRegMap &VRM) const {
758 MachineRegisterInfo &MRI = MF.getRegInfo();
759
760 // First allocate registers for the empty intervals.
761 for (const Register &R : EmptyIntervalVRegs) {
762 LiveInterval &LI = LIS.getInterval(R);
763
764 Register PReg = MRI.getSimpleHint(LI.reg());
765
766 if (PReg == 0) {
767 const TargetRegisterClass &RC = *MRI.getRegClass(LI.reg());
768 const ArrayRef<MCPhysReg> RawPRegOrder = RC.getRawAllocationOrder(MF);
769 for (MCRegister CandidateReg : RawPRegOrder) {
770 if (!VRM.getRegInfo().isReserved(CandidateReg)) {
771 PReg = CandidateReg;
772 break;
773 }
774 }
775 assert(PReg &&((PReg && "No un-reserved physical registers in this register class"
) ? static_cast<void> (0) : __assert_fail ("PReg && \"No un-reserved physical registers in this register class\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 776, __PRETTY_FUNCTION__))
776 "No un-reserved physical registers in this register class")((PReg && "No un-reserved physical registers in this register class"
) ? static_cast<void> (0) : __assert_fail ("PReg && \"No un-reserved physical registers in this register class\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 776, __PRETTY_FUNCTION__))
;
777 }
778
779 VRM.assignVirt2Phys(LI.reg(), PReg);
780 }
781}
782
783void RegAllocPBQP::postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS) {
784 VRegSpiller.postOptimization();
785 /// Remove dead defs because of rematerialization.
786 for (auto DeadInst : DeadRemats) {
787 LIS.RemoveMachineInstrFromMaps(*DeadInst);
788 DeadInst->eraseFromParent();
789 }
790 DeadRemats.clear();
791}
792
793bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
794 LiveIntervals &LIS = getAnalysis<LiveIntervals>();
795 MachineBlockFrequencyInfo &MBFI =
796 getAnalysis<MachineBlockFrequencyInfo>();
797
798 VirtRegMap &VRM = getAnalysis<VirtRegMap>();
799
800 PBQPVirtRegAuxInfo VRAI(MF, LIS, VRM, getAnalysis<MachineLoopInfo>(), MBFI);
801 VRAI.calculateSpillWeightsAndHints();
802
803 // FIXME: we create DefaultVRAI here to match existing behavior pre-passing
804 // the VRAI through the spiller to the live range editor. However, it probably
805 // makes more sense to pass the PBQP VRAI. The existing behavior had
806 // LiveRangeEdit make its own VirtRegAuxInfo object.
807 VirtRegAuxInfo DefaultVRAI(MF, LIS, VRM, getAnalysis<MachineLoopInfo>(),
808 MBFI);
809 std::unique_ptr<Spiller> VRegSpiller(
810 createInlineSpiller(*this, MF, VRM, DefaultVRAI));
811
812 MF.getRegInfo().freezeReservedRegs(MF);
813
814 LLVM_DEBUG(dbgs() << "PBQP Register Allocating for " << MF.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "PBQP Register Allocating for "
<< MF.getName() << "\n"; } } while (false)
;
815
816 // Allocator main loop:
817 //
818 // * Map current regalloc problem to a PBQP problem
819 // * Solve the PBQP problem
820 // * Map the solution back to a register allocation
821 // * Spill if necessary
822 //
823 // This process is continued till no more spills are generated.
824
825 // Find the vreg intervals in need of allocation.
826 findVRegIntervalsToAlloc(MF, LIS);
827
828#ifndef NDEBUG
829 const Function &F = MF.getFunction();
830 std::string FullyQualifiedName =
831 F.getParent()->getModuleIdentifier() + "." + F.getName().str();
832#endif
833
834 // If there are non-empty intervals allocate them using pbqp.
835 if (!VRegsToAlloc.empty()) {
836 const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
837 std::unique_ptr<PBQPRAConstraintList> ConstraintsRoot =
838 std::make_unique<PBQPRAConstraintList>();
839 ConstraintsRoot->addConstraint(std::make_unique<SpillCosts>());
840 ConstraintsRoot->addConstraint(std::make_unique<Interference>());
841 if (PBQPCoalescing)
842 ConstraintsRoot->addConstraint(std::make_unique<Coalescing>());
843 ConstraintsRoot->addConstraint(Subtarget.getCustomPBQPConstraints());
844
845 bool PBQPAllocComplete = false;
846 unsigned Round = 0;
847
848 while (!PBQPAllocComplete) {
849 LLVM_DEBUG(dbgs() << " PBQP Regalloc round " << Round << ":\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << " PBQP Regalloc round " <<
Round << ":\n"; } } while (false)
;
850
851 PBQPRAGraph G(PBQPRAGraph::GraphMetadata(MF, LIS, MBFI));
852 initializeGraph(G, VRM, *VRegSpiller);
853 ConstraintsRoot->apply(G);
854
855#ifndef NDEBUG
856 if (PBQPDumpGraphs) {
857 std::ostringstream RS;
858 RS << Round;
859 std::string GraphFileName = FullyQualifiedName + "." + RS.str() +
860 ".pbqpgraph";
861 std::error_code EC;
862 raw_fd_ostream OS(GraphFileName, EC, sys::fs::OF_TextWithCRLF);
863 LLVM_DEBUG(dbgs() << "Dumping graph for round " << Round << " to \""do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Dumping graph for round " <<
Round << " to \"" << GraphFileName << "\"\n"
; } } while (false)
864 << GraphFileName << "\"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Dumping graph for round " <<
Round << " to \"" << GraphFileName << "\"\n"
; } } while (false)
;
865 G.dump(OS);
866 }
867#endif
868
869 PBQP::Solution Solution = PBQP::RegAlloc::solve(G);
870 PBQPAllocComplete = mapPBQPToRegAlloc(G, Solution, VRM, *VRegSpiller);
871 ++Round;
872 }
873 }
874
875 // Finalise allocation, allocate empty ranges.
876 finalizeAlloc(MF, LIS, VRM);
877 postOptimization(*VRegSpiller, LIS);
878 VRegsToAlloc.clear();
879 EmptyIntervalVRegs.clear();
880
881 LLVM_DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << VRM << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("regalloc")) { dbgs() << "Post alloc VirtRegMap:\n" <<
VRM << "\n"; } } while (false)
;
882
883 return true;
884}
885
886/// Create Printable object for node and register info.
887static Printable PrintNodeInfo(PBQP::RegAlloc::PBQPRAGraph::NodeId NId,
888 const PBQP::RegAlloc::PBQPRAGraph &G) {
889 return Printable([NId, &G](raw_ostream &OS) {
890 const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
891 const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
892 Register VReg = G.getNodeMetadata(NId).getVReg();
893 const char *RegClassName = TRI->getRegClassName(MRI.getRegClass(VReg));
894 OS << NId << " (" << RegClassName << ':' << printReg(VReg, TRI) << ')';
895 });
896}
897
898#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
899LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void PBQP::RegAlloc::PBQPRAGraph::dump(raw_ostream &OS) const {
900 for (auto NId : nodeIds()) {
901 const Vector &Costs = getNodeCosts(NId);
902 assert(Costs.getLength() != 0 && "Empty vector in graph.")((Costs.getLength() != 0 && "Empty vector in graph.")
? static_cast<void> (0) : __assert_fail ("Costs.getLength() != 0 && \"Empty vector in graph.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 902, __PRETTY_FUNCTION__))
;
903 OS << PrintNodeInfo(NId, *this) << ": " << Costs << '\n';
904 }
905 OS << '\n';
906
907 for (auto EId : edgeIds()) {
908 NodeId N1Id = getEdgeNode1Id(EId);
909 NodeId N2Id = getEdgeNode2Id(EId);
910 assert(N1Id != N2Id && "PBQP graphs should not have self-edges.")((N1Id != N2Id && "PBQP graphs should not have self-edges."
) ? static_cast<void> (0) : __assert_fail ("N1Id != N2Id && \"PBQP graphs should not have self-edges.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 910, __PRETTY_FUNCTION__))
;
911 const Matrix &M = getEdgeCosts(EId);
912 assert(M.getRows() != 0 && "No rows in matrix.")((M.getRows() != 0 && "No rows in matrix.") ? static_cast
<void> (0) : __assert_fail ("M.getRows() != 0 && \"No rows in matrix.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 912, __PRETTY_FUNCTION__))
;
913 assert(M.getCols() != 0 && "No cols in matrix.")((M.getCols() != 0 && "No cols in matrix.") ? static_cast
<void> (0) : __assert_fail ("M.getCols() != 0 && \"No cols in matrix.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/CodeGen/RegAllocPBQP.cpp"
, 913, __PRETTY_FUNCTION__))
;
914 OS << PrintNodeInfo(N1Id, *this) << ' ' << M.getRows() << " rows / ";
915 OS << PrintNodeInfo(N2Id, *this) << ' ' << M.getCols() << " cols:\n";
916 OS << M << '\n';
917 }
918}
919
920LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void PBQP::RegAlloc::PBQPRAGraph::dump() const {
921 dump(dbgs());
922}
923#endif
924
925void PBQP::RegAlloc::PBQPRAGraph::printDot(raw_ostream &OS) const {
926 OS << "graph {\n";
927 for (auto NId : nodeIds()) {
928 OS << " node" << NId << " [ label=\""
929 << PrintNodeInfo(NId, *this) << "\\n"
930 << getNodeCosts(NId) << "\" ]\n";
931 }
932
933 OS << " edge [ len=" << nodeIds().size() << " ]\n";
934 for (auto EId : edgeIds()) {
935 OS << " node" << getEdgeNode1Id(EId)
936 << " -- node" << getEdgeNode2Id(EId)
937 << " [ label=\"";
938 const Matrix &EdgeCosts = getEdgeCosts(EId);
939 for (unsigned i = 0; i < EdgeCosts.getRows(); ++i) {
940 OS << EdgeCosts.getRowAsVector(i) << "\\n";
941 }
942 OS << "\" ]\n";
943 }
944 OS << "}\n";
945}
946
947FunctionPass *llvm::createPBQPRegisterAllocator(char *customPassID) {
948 return new RegAllocPBQP(customPassID);
949}
950
951FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
952 return createPBQPRegisterAllocator();
953}

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h

1//===- Graph.h - PBQP Graph -------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// PBQP Graph class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
14#define LLVM_CODEGEN_PBQP_GRAPH_H
15
16#include "llvm/ADT/STLExtras.h"
17#include <algorithm>
18#include <cassert>
19#include <iterator>
20#include <limits>
21#include <vector>
22
23namespace llvm {
24namespace PBQP {
25
26 class GraphBase {
27 public:
28 using NodeId = unsigned;
29 using EdgeId = unsigned;
30
31 /// Returns a value representing an invalid (non-existent) node.
32 static NodeId invalidNodeId() {
33 return std::numeric_limits<NodeId>::max();
34 }
35
36 /// Returns a value representing an invalid (non-existent) edge.
37 static EdgeId invalidEdgeId() {
38 return std::numeric_limits<EdgeId>::max();
39 }
40 };
41
42 /// PBQP Graph class.
43 /// Instances of this class describe PBQP problems.
44 ///
45 template <typename SolverT>
46 class Graph : public GraphBase {
47 private:
48 using CostAllocator = typename SolverT::CostAllocator;
49
50 public:
51 using RawVector = typename SolverT::RawVector;
52 using RawMatrix = typename SolverT::RawMatrix;
53 using Vector = typename SolverT::Vector;
54 using Matrix = typename SolverT::Matrix;
55 using VectorPtr = typename CostAllocator::VectorPtr;
56 using MatrixPtr = typename CostAllocator::MatrixPtr;
57 using NodeMetadata = typename SolverT::NodeMetadata;
58 using EdgeMetadata = typename SolverT::EdgeMetadata;
59 using GraphMetadata = typename SolverT::GraphMetadata;
60
61 private:
62 class NodeEntry {
63 public:
64 using AdjEdgeList = std::vector<EdgeId>;
65 using AdjEdgeIdx = AdjEdgeList::size_type;
66 using AdjEdgeItr = AdjEdgeList::const_iterator;
67
68 NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}
69
70 static AdjEdgeIdx getInvalidAdjEdgeIdx() {
71 return std::numeric_limits<AdjEdgeIdx>::max();
72 }
73
74 AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
75 AdjEdgeIdx Idx = AdjEdgeIds.size();
76 AdjEdgeIds.push_back(EId);
77 return Idx;
78 }
79
80 void removeAdjEdgeId(Graph &G, NodeId ThisNId, AdjEdgeIdx Idx) {
81 // Swap-and-pop for fast removal.
82 // 1) Update the adj index of the edge currently at back().
83 // 2) Move last Edge down to Idx.
84 // 3) pop_back()
85 // If Idx == size() - 1 then the setAdjEdgeIdx and swap are
86 // redundant, but both operations are cheap.
87 G.getEdge(AdjEdgeIds.back()).setAdjEdgeIdx(ThisNId, Idx);
88 AdjEdgeIds[Idx] = AdjEdgeIds.back();
89 AdjEdgeIds.pop_back();
90 }
91
92 const AdjEdgeList& getAdjEdgeIds() const { return AdjEdgeIds; }
93
94 VectorPtr Costs;
95 NodeMetadata Metadata;
96
97 private:
98 AdjEdgeList AdjEdgeIds;
99 };
100
101 class EdgeEntry {
102 public:
103 EdgeEntry(NodeId N1Id, NodeId N2Id, MatrixPtr Costs)
104 : Costs(std::move(Costs)) {
105 NIds[0] = N1Id;
106 NIds[1] = N2Id;
107 ThisEdgeAdjIdxs[0] = NodeEntry::getInvalidAdjEdgeIdx();
108 ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
109 }
110
111 void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
112 assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&((ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge already connected to NIds[NIdx].") ? static_cast<void
> (0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() && \"Edge already connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 113, __PRETTY_FUNCTION__))
113 "Edge already connected to NIds[NIdx].")((ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge already connected to NIds[NIdx].") ? static_cast<void
> (0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() && \"Edge already connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 113, __PRETTY_FUNCTION__))
;
114 NodeEntry &N = G.getNode(NIds[NIdx]);
115 ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
116 }
117
118 void connect(Graph &G, EdgeId ThisEdgeId) {
119 connectToN(G, ThisEdgeId, 0);
120 connectToN(G, ThisEdgeId, 1);
121 }
122
123 void setAdjEdgeIdx(NodeId NId, typename NodeEntry::AdjEdgeIdx NewIdx) {
124 if (NId == NIds[0])
125 ThisEdgeAdjIdxs[0] = NewIdx;
126 else {
127 assert(NId == NIds[1] && "Edge not connected to NId")((NId == NIds[1] && "Edge not connected to NId") ? static_cast
<void> (0) : __assert_fail ("NId == NIds[1] && \"Edge not connected to NId\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 127, __PRETTY_FUNCTION__))
;
128 ThisEdgeAdjIdxs[1] = NewIdx;
129 }
130 }
131
132 void disconnectFromN(Graph &G, unsigned NIdx) {
133 assert(ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&((ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge not connected to NIds[NIdx].") ? static_cast<void>
(0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() && \"Edge not connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 134, __PRETTY_FUNCTION__))
134 "Edge not connected to NIds[NIdx].")((ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge not connected to NIds[NIdx].") ? static_cast<void>
(0) : __assert_fail ("ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() && \"Edge not connected to NIds[NIdx].\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 134, __PRETTY_FUNCTION__))
;
135 NodeEntry &N = G.getNode(NIds[NIdx]);
136 N.removeAdjEdgeId(G, NIds[NIdx], ThisEdgeAdjIdxs[NIdx]);
137 ThisEdgeAdjIdxs[NIdx] = NodeEntry::getInvalidAdjEdgeIdx();
138 }
139
140 void disconnectFrom(Graph &G, NodeId NId) {
141 if (NId == NIds[0])
142 disconnectFromN(G, 0);
143 else {
144 assert(NId == NIds[1] && "Edge does not connect NId")((NId == NIds[1] && "Edge does not connect NId") ? static_cast
<void> (0) : __assert_fail ("NId == NIds[1] && \"Edge does not connect NId\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 144, __PRETTY_FUNCTION__))
;
145 disconnectFromN(G, 1);
146 }
147 }
148
149 NodeId getN1Id() const { return NIds[0]; }
150 NodeId getN2Id() const { return NIds[1]; }
151
152 MatrixPtr Costs;
153 EdgeMetadata Metadata;
154
155 private:
156 NodeId NIds[2];
157 typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
158 };
159
160 // ----- MEMBERS -----
161
162 GraphMetadata Metadata;
163 CostAllocator CostAlloc;
164 SolverT *Solver = nullptr;
165
166 using NodeVector = std::vector<NodeEntry>;
167 using FreeNodeVector = std::vector<NodeId>;
168 NodeVector Nodes;
169 FreeNodeVector FreeNodeIds;
170
171 using EdgeVector = std::vector<EdgeEntry>;
172 using FreeEdgeVector = std::vector<EdgeId>;
173 EdgeVector Edges;
174 FreeEdgeVector FreeEdgeIds;
175
176 Graph(const Graph &Other) {}
177
178 // ----- INTERNAL METHODS -----
179
180 NodeEntry &getNode(NodeId NId) {
181 assert(NId < Nodes.size() && "Out of bound NodeId")((NId < Nodes.size() && "Out of bound NodeId") ? static_cast
<void> (0) : __assert_fail ("NId < Nodes.size() && \"Out of bound NodeId\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 181, __PRETTY_FUNCTION__))
;
182 return Nodes[NId];
183 }
184 const NodeEntry &getNode(NodeId NId) const {
185 assert(NId < Nodes.size() && "Out of bound NodeId")((NId < Nodes.size() && "Out of bound NodeId") ? static_cast
<void> (0) : __assert_fail ("NId < Nodes.size() && \"Out of bound NodeId\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 185, __PRETTY_FUNCTION__))
;
186 return Nodes[NId];
187 }
188
189 EdgeEntry& getEdge(EdgeId EId) { return Edges[EId]; }
190 const EdgeEntry& getEdge(EdgeId EId) const { return Edges[EId]; }
191
192 NodeId addConstructedNode(NodeEntry N) {
193 NodeId NId = 0;
194 if (!FreeNodeIds.empty()) {
195 NId = FreeNodeIds.back();
196 FreeNodeIds.pop_back();
197 Nodes[NId] = std::move(N);
198 } else {
199 NId = Nodes.size();
200 Nodes.push_back(std::move(N));
201 }
202 return NId;
203 }
204
205 EdgeId addConstructedEdge(EdgeEntry E) {
206 assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&((findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
"Attempt to add duplicate edge.") ? static_cast<void> (
0) : __assert_fail ("findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() && \"Attempt to add duplicate edge.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 207, __PRETTY_FUNCTION__))
207 "Attempt to add duplicate edge.")((findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
"Attempt to add duplicate edge.") ? static_cast<void> (
0) : __assert_fail ("findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() && \"Attempt to add duplicate edge.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 207, __PRETTY_FUNCTION__))
;
208 EdgeId EId = 0;
209 if (!FreeEdgeIds.empty()) {
210 EId = FreeEdgeIds.back();
211 FreeEdgeIds.pop_back();
212 Edges[EId] = std::move(E);
213 } else {
214 EId = Edges.size();
215 Edges.push_back(std::move(E));
216 }
217
218 EdgeEntry &NE = getEdge(EId);
219
220 // Add the edge to the adjacency sets of its nodes.
221 NE.connect(*this, EId);
222 return EId;
223 }
224
225 void operator=(const Graph &Other) {}
226
227 public:
228 using AdjEdgeItr = typename NodeEntry::AdjEdgeItr;
229
230 class NodeItr {
231 public:
232 using iterator_category = std::forward_iterator_tag;
233 using value_type = NodeId;
234 using difference_type = int;
235 using pointer = NodeId *;
236 using reference = NodeId &;
237
238 NodeItr(NodeId CurNId, const Graph &G)
239 : CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
240 this->CurNId = findNextInUse(CurNId); // Move to first in-use node id
241 }
242
243 bool operator==(const NodeItr &O) const { return CurNId == O.CurNId; }
244 bool operator!=(const NodeItr &O) const { return !(*this == O); }
245 NodeItr& operator++() { CurNId = findNextInUse(++CurNId); return *this; }
246 NodeId operator*() const { return CurNId; }
247
248 private:
249 NodeId findNextInUse(NodeId NId) const {
250 while (NId < EndNId && is_contained(FreeNodeIds, NId)) {
251 ++NId;
252 }
253 return NId;
254 }
255
256 NodeId CurNId, EndNId;
257 const FreeNodeVector &FreeNodeIds;
258 };
259
260 class EdgeItr {
261 public:
262 EdgeItr(EdgeId CurEId, const Graph &G)
263 : CurEId(CurEId), EndEId(G.Edges.size()), FreeEdgeIds(G.FreeEdgeIds) {
264 this->CurEId = findNextInUse(CurEId); // Move to first in-use edge id
265 }
266
267 bool operator==(const EdgeItr &O) const { return CurEId == O.CurEId; }
268 bool operator!=(const EdgeItr &O) const { return !(*this == O); }
269 EdgeItr& operator++() { CurEId = findNextInUse(++CurEId); return *this; }
270 EdgeId operator*() const { return CurEId; }
271
272 private:
273 EdgeId findNextInUse(EdgeId EId) const {
274 while (EId < EndEId && is_contained(FreeEdgeIds, EId)) {
275 ++EId;
276 }
277 return EId;
278 }
279
280 EdgeId CurEId, EndEId;
281 const FreeEdgeVector &FreeEdgeIds;
282 };
283
284 class NodeIdSet {
285 public:
286 NodeIdSet(const Graph &G) : G(G) {}
287
288 NodeItr begin() const { return NodeItr(0, G); }
289 NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
290
291 bool empty() const { return G.Nodes.empty(); }
292
293 typename NodeVector::size_type size() const {
294 return G.Nodes.size() - G.FreeNodeIds.size();
295 }
296
297 private:
298 const Graph& G;
299 };
300
301 class EdgeIdSet {
302 public:
303 EdgeIdSet(const Graph &G) : G(G) {}
304
305 EdgeItr begin() const { return EdgeItr(0, G); }
306 EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
307
308 bool empty() const { return G.Edges.empty(); }
309
310 typename NodeVector::size_type size() const {
311 return G.Edges.size() - G.FreeEdgeIds.size();
312 }
313
314 private:
315 const Graph& G;
316 };
317
318 class AdjEdgeIdSet {
319 public:
320 AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) {}
321
322 typename NodeEntry::AdjEdgeItr begin() const {
323 return NE.getAdjEdgeIds().begin();
324 }
325
326 typename NodeEntry::AdjEdgeItr end() const {
327 return NE.getAdjEdgeIds().end();
328 }
329
330 bool empty() const { return NE.getAdjEdgeIds().empty(); }
331
332 typename NodeEntry::AdjEdgeList::size_type size() const {
333 return NE.getAdjEdgeIds().size();
334 }
335
336 private:
337 const NodeEntry &NE;
338 };
339
340 /// Construct an empty PBQP graph.
341 Graph() = default;
342
343 /// Construct an empty PBQP graph with the given graph metadata.
344 Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}
345
346 /// Get a reference to the graph metadata.
347 GraphMetadata& getMetadata() { return Metadata; }
348
349 /// Get a const-reference to the graph metadata.
350 const GraphMetadata& getMetadata() const { return Metadata; }
351
352 /// Lock this graph to the given solver instance in preparation
353 /// for running the solver. This method will call solver.handleAddNode for
354 /// each node in the graph, and handleAddEdge for each edge, to give the
355 /// solver an opportunity to set up any requried metadata.
356 void setSolver(SolverT &S) {
357 assert(!Solver && "Solver already set. Call unsetSolver().")((!Solver && "Solver already set. Call unsetSolver()."
) ? static_cast<void> (0) : __assert_fail ("!Solver && \"Solver already set. Call unsetSolver().\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 357, __PRETTY_FUNCTION__))
;
358 Solver = &S;
359 for (auto NId : nodeIds())
360 Solver->handleAddNode(NId);
361 for (auto EId : edgeIds())
362 Solver->handleAddEdge(EId);
363 }
364
365 /// Release from solver instance.
366 void unsetSolver() {
367 assert(Solver && "Solver not set.")((Solver && "Solver not set.") ? static_cast<void>
(0) : __assert_fail ("Solver && \"Solver not set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 367, __PRETTY_FUNCTION__))
;
368 Solver = nullptr;
369 }
370
371 /// Add a node with the given costs.
372 /// @param Costs Cost vector for the new node.
373 /// @return Node iterator for the added node.
374 template <typename OtherVectorT>
375 NodeId addNode(OtherVectorT Costs) {
376 // Get cost vector from the problem domain
377 VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
378 NodeId NId = addConstructedNode(NodeEntry(AllocatedCosts));
379 if (Solver)
380 Solver->handleAddNode(NId);
381 return NId;
382 }
383
384 /// Add a node bypassing the cost allocator.
385 /// @param Costs Cost vector ptr for the new node (must be convertible to
386 /// VectorPtr).
387 /// @return Node iterator for the added node.
388 ///
389 /// This method allows for fast addition of a node whose costs don't need
390 /// to be passed through the cost allocator. The most common use case for
391 /// this is when duplicating costs from an existing node (when using a
392 /// pooling allocator). These have already been uniqued, so we can avoid
393 /// re-constructing and re-uniquing them by attaching them directly to the
394 /// new node.
395 template <typename OtherVectorPtrT>
396 NodeId addNodeBypassingCostAllocator(OtherVectorPtrT Costs) {
397 NodeId NId = addConstructedNode(NodeEntry(Costs));
398 if (Solver)
399 Solver->handleAddNode(NId);
400 return NId;
401 }
402
403 /// Add an edge between the given nodes with the given costs.
404 /// @param N1Id First node.
405 /// @param N2Id Second node.
406 /// @param Costs Cost matrix for new edge.
407 /// @return Edge iterator for the added edge.
408 template <typename OtherVectorT>
409 EdgeId addEdge(NodeId N1Id, NodeId N2Id, OtherVectorT Costs) {
410 assert(getNodeCosts(N1Id).getLength() == Costs.getRows() &&((getNodeCosts(N1Id).getLength() == Costs.getRows() &&
getNodeCosts(N2Id).getLength() == Costs.getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs.getRows() && getNodeCosts(N2Id).getLength() == Costs.getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 412, __PRETTY_FUNCTION__))
7
Assuming the condition is true
8
Assuming the condition is true
9
'?' condition is true
411 getNodeCosts(N2Id).getLength() == Costs.getCols() &&((getNodeCosts(N1Id).getLength() == Costs.getRows() &&
getNodeCosts(N2Id).getLength() == Costs.getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs.getRows() && getNodeCosts(N2Id).getLength() == Costs.getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 412, __PRETTY_FUNCTION__))
412 "Matrix dimensions mismatch.")((getNodeCosts(N1Id).getLength() == Costs.getRows() &&
getNodeCosts(N2Id).getLength() == Costs.getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs.getRows() && getNodeCosts(N2Id).getLength() == Costs.getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 412, __PRETTY_FUNCTION__))
;
413 // Get cost matrix from the problem domain.
414 MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
10
Calling 'PoolCostAllocator::getMatrix'
415 EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, AllocatedCosts));
416 if (Solver)
417 Solver->handleAddEdge(EId);
418 return EId;
419 }
420
421 /// Add an edge bypassing the cost allocator.
422 /// @param N1Id First node.
423 /// @param N2Id Second node.
424 /// @param Costs Cost matrix for new edge.
425 /// @return Edge iterator for the added edge.
426 ///
427 /// This method allows for fast addition of an edge whose costs don't need
428 /// to be passed through the cost allocator. The most common use case for
429 /// this is when duplicating costs from an existing edge (when using a
430 /// pooling allocator). These have already been uniqued, so we can avoid
431 /// re-constructing and re-uniquing them by attaching them directly to the
432 /// new edge.
433 template <typename OtherMatrixPtrT>
434 NodeId addEdgeBypassingCostAllocator(NodeId N1Id, NodeId N2Id,
435 OtherMatrixPtrT Costs) {
436 assert(getNodeCosts(N1Id).getLength() == Costs->getRows() &&((getNodeCosts(N1Id).getLength() == Costs->getRows() &&
getNodeCosts(N2Id).getLength() == Costs->getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs->getRows() && getNodeCosts(N2Id).getLength() == Costs->getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 438, __PRETTY_FUNCTION__))
437 getNodeCosts(N2Id).getLength() == Costs->getCols() &&((getNodeCosts(N1Id).getLength() == Costs->getRows() &&
getNodeCosts(N2Id).getLength() == Costs->getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs->getRows() && getNodeCosts(N2Id).getLength() == Costs->getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 438, __PRETTY_FUNCTION__))
438 "Matrix dimensions mismatch.")((getNodeCosts(N1Id).getLength() == Costs->getRows() &&
getNodeCosts(N2Id).getLength() == Costs->getCols() &&
"Matrix dimensions mismatch.") ? static_cast<void> (0)
: __assert_fail ("getNodeCosts(N1Id).getLength() == Costs->getRows() && getNodeCosts(N2Id).getLength() == Costs->getCols() && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Graph.h"
, 438, __PRETTY_FUNCTION__))
;
439 // Get cost matrix from the problem domain.
440 EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, Costs));
441 if (Solver)
442 Solver->handleAddEdge(EId);
443 return EId;
444 }
445
446 /// Returns true if the graph is empty.
447 bool empty() const { return NodeIdSet(*this).empty(); }
448
449 NodeIdSet nodeIds() const { return NodeIdSet(*this); }
450 EdgeIdSet edgeIds() const { return EdgeIdSet(*this); }
451
452 AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }
453
454 /// Get the number of nodes in the graph.
455 /// @return Number of nodes in the graph.
456 unsigned getNumNodes() const { return NodeIdSet(*this).size(); }
457
458 /// Get the number of edges in the graph.
459 /// @return Number of edges in the graph.
460 unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }
461
462 /// Set a node's cost vector.
463 /// @param NId Node to update.
464 /// @param Costs New costs to set.
465 template <typename OtherVectorT>
466 void setNodeCosts(NodeId NId, OtherVectorT Costs) {
467 VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
468 if (Solver)
469 Solver->handleSetNodeCosts(NId, *AllocatedCosts);
470 getNode(NId).Costs = AllocatedCosts;
471 }
472
473 /// Get a VectorPtr to a node's cost vector. Rarely useful - use
474 /// getNodeCosts where possible.
475 /// @param NId Node id.
476 /// @return VectorPtr to node cost vector.
477 ///
478 /// This method is primarily useful for duplicating costs quickly by
479 /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
480 /// getNodeCosts when dealing with node cost values.
481 const VectorPtr& getNodeCostsPtr(NodeId NId) const {
482 return getNode(NId).Costs;
483 }
484
485 /// Get a node's cost vector.
486 /// @param NId Node id.
487 /// @return Node cost vector.
488 const Vector& getNodeCosts(NodeId NId) const {
489 return *getNodeCostsPtr(NId);
490 }
491
492 NodeMetadata& getNodeMetadata(NodeId NId) {
493 return getNode(NId).Metadata;
494 }
495
496 const NodeMetadata& getNodeMetadata(NodeId NId) const {
497 return getNode(NId).Metadata;
498 }
499
500 typename NodeEntry::AdjEdgeList::size_type getNodeDegree(NodeId NId) const {
501 return getNode(NId).getAdjEdgeIds().size();
502 }
503
504 /// Update an edge's cost matrix.
505 /// @param EId Edge id.
506 /// @param Costs New cost matrix.
507 template <typename OtherMatrixT>
508 void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
509 MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
510 if (Solver)
511 Solver->handleUpdateCosts(EId, *AllocatedCosts);
512 getEdge(EId).Costs = AllocatedCosts;
513 }
514
515 /// Get a MatrixPtr to a node's cost matrix. Rarely useful - use
516 /// getEdgeCosts where possible.
517 /// @param EId Edge id.
518 /// @return MatrixPtr to edge cost matrix.
519 ///
520 /// This method is primarily useful for duplicating costs quickly by
521 /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
522 /// getEdgeCosts when dealing with edge cost values.
523 const MatrixPtr& getEdgeCostsPtr(EdgeId EId) const {
524 return getEdge(EId).Costs;
525 }
526
527 /// Get an edge's cost matrix.
528 /// @param EId Edge id.
529 /// @return Edge cost matrix.
530 const Matrix& getEdgeCosts(EdgeId EId) const {
531 return *getEdge(EId).Costs;
532 }
533
534 EdgeMetadata& getEdgeMetadata(EdgeId EId) {
535 return getEdge(EId).Metadata;
536 }
537
538 const EdgeMetadata& getEdgeMetadata(EdgeId EId) const {
539 return getEdge(EId).Metadata;
540 }
541
542 /// Get the first node connected to this edge.
543 /// @param EId Edge id.
544 /// @return The first node connected to the given edge.
545 NodeId getEdgeNode1Id(EdgeId EId) const {
546 return getEdge(EId).getN1Id();
547 }
548
549 /// Get the second node connected to this edge.
550 /// @param EId Edge id.
551 /// @return The second node connected to the given edge.
552 NodeId getEdgeNode2Id(EdgeId EId) const {
553 return getEdge(EId).getN2Id();
554 }
555
556 /// Get the "other" node connected to this edge.
557 /// @param EId Edge id.
558 /// @param NId Node id for the "given" node.
559 /// @return The iterator for the "other" node connected to this edge.
560 NodeId getEdgeOtherNodeId(EdgeId EId, NodeId NId) {
561 EdgeEntry &E = getEdge(EId);
562 if (E.getN1Id() == NId) {
563 return E.getN2Id();
564 } // else
565 return E.getN1Id();
566 }
567
568 /// Get the edge connecting two nodes.
569 /// @param N1Id First node id.
570 /// @param N2Id Second node id.
571 /// @return An id for edge (N1Id, N2Id) if such an edge exists,
572 /// otherwise returns an invalid edge id.
573 EdgeId findEdge(NodeId N1Id, NodeId N2Id) {
574 for (auto AEId : adjEdgeIds(N1Id)) {
575 if ((getEdgeNode1Id(AEId) == N2Id) ||
576 (getEdgeNode2Id(AEId) == N2Id)) {
577 return AEId;
578 }
579 }
580 return invalidEdgeId();
581 }
582
583 /// Remove a node from the graph.
584 /// @param NId Node id.
585 void removeNode(NodeId NId) {
586 if (Solver)
587 Solver->handleRemoveNode(NId);
588 NodeEntry &N = getNode(NId);
589 // TODO: Can this be for-each'd?
590 for (AdjEdgeItr AEItr = N.adjEdgesBegin(),
591 AEEnd = N.adjEdgesEnd();
592 AEItr != AEEnd;) {
593 EdgeId EId = *AEItr;
594 ++AEItr;
595 removeEdge(EId);
596 }
597 FreeNodeIds.push_back(NId);
598 }
599
600 /// Disconnect an edge from the given node.
601 ///
602 /// Removes the given edge from the adjacency list of the given node.
603 /// This operation leaves the edge in an 'asymmetric' state: It will no
604 /// longer appear in an iteration over the given node's (NId's) edges, but
605 /// will appear in an iteration over the 'other', unnamed node's edges.
606 ///
607 /// This does not correspond to any normal graph operation, but exists to
608 /// support efficient PBQP graph-reduction based solvers. It is used to
609 /// 'effectively' remove the unnamed node from the graph while the solver
610 /// is performing the reduction. The solver will later call reconnectNode
611 /// to restore the edge in the named node's adjacency list.
612 ///
613 /// Since the degree of a node is the number of connected edges,
614 /// disconnecting an edge from a node 'u' will cause the degree of 'u' to
615 /// drop by 1.
616 ///
617 /// A disconnected edge WILL still appear in an iteration over the graph
618 /// edges.
619 ///
620 /// A disconnected edge should not be removed from the graph, it should be
621 /// reconnected first.
622 ///
623 /// A disconnected edge can be reconnected by calling the reconnectEdge
624 /// method.
625 void disconnectEdge(EdgeId EId, NodeId NId) {
626 if (Solver)
627 Solver->handleDisconnectEdge(EId, NId);
628
629 EdgeEntry &E = getEdge(EId);
630 E.disconnectFrom(*this, NId);
631 }
632
633 /// Convenience method to disconnect all neighbours from the given
634 /// node.
635 void disconnectAllNeighborsFromNode(NodeId NId) {
636 for (auto AEId : adjEdgeIds(NId))
637 disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
638 }
639
640 /// Re-attach an edge to its nodes.
641 ///
642 /// Adds an edge that had been previously disconnected back into the
643 /// adjacency set of the nodes that the edge connects.
644 void reconnectEdge(EdgeId EId, NodeId NId) {
645 EdgeEntry &E = getEdge(EId);
646 E.connectTo(*this, EId, NId);
647 if (Solver)
648 Solver->handleReconnectEdge(EId, NId);
649 }
650
651 /// Remove an edge from the graph.
652 /// @param EId Edge id.
653 void removeEdge(EdgeId EId) {
654 if (Solver)
655 Solver->handleRemoveEdge(EId);
656 EdgeEntry &E = getEdge(EId);
657 E.disconnect();
658 FreeEdgeIds.push_back(EId);
659 Edges[EId].invalidate();
660 }
661
662 /// Remove all nodes and edges from the graph.
663 void clear() {
664 Nodes.clear();
665 FreeNodeIds.clear();
666 Edges.clear();
667 FreeEdgeIds.clear();
668 }
669 };
670
671} // end namespace PBQP
672} // end namespace llvm
673
674#endif // LLVM_CODEGEN_PBQP_GRAPH_H

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/CostAllocator.h

1//===- CostAllocator.h - PBQP Cost Allocator --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Defines classes conforming to the PBQP cost value manager concept.
10//
11// Cost value managers are memory managers for PBQP cost values (vectors and
12// matrices). Since PBQP graphs can grow very large (E.g. hundreds of thousands
13// of edges on the largest function in SPEC2006).
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
18#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
19
20#include "llvm/ADT/DenseSet.h"
21#include <algorithm>
22#include <cstdint>
23#include <memory>
24
25namespace llvm {
26namespace PBQP {
27
28template <typename ValueT> class ValuePool {
29public:
30 using PoolRef = std::shared_ptr<const ValueT>;
31
32private:
33 class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
34 public:
35 template <typename ValueKeyT>
36 PoolEntry(ValuePool &Pool, ValueKeyT Value)
37 : Pool(Pool), Value(std::move(Value)) {}
22
Calling constructor for 'MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>'
38
39 ~PoolEntry() { Pool.removeEntry(this); }
40
41 const ValueT &getValue() const { return Value; }
42
43 private:
44 ValuePool &Pool;
45 ValueT Value;
46 };
47
48 class PoolEntryDSInfo {
49 public:
50 static inline PoolEntry *getEmptyKey() { return nullptr; }
51
52 static inline PoolEntry *getTombstoneKey() {
53 return reinterpret_cast<PoolEntry *>(static_cast<uintptr_t>(1));
54 }
55
56 template <typename ValueKeyT>
57 static unsigned getHashValue(const ValueKeyT &C) {
58 return hash_value(C);
59 }
60
61 static unsigned getHashValue(PoolEntry *P) {
62 return getHashValue(P->getValue());
63 }
64
65 static unsigned getHashValue(const PoolEntry *P) {
66 return getHashValue(P->getValue());
67 }
68
69 template <typename ValueKeyT1, typename ValueKeyT2>
70 static bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
71 return C1 == C2;
72 }
73
74 template <typename ValueKeyT>
75 static bool isEqual(const ValueKeyT &C, PoolEntry *P) {
76 if (P == getEmptyKey() || P == getTombstoneKey())
77 return false;
78 return isEqual(C, P->getValue());
79 }
80
81 static bool isEqual(PoolEntry *P1, PoolEntry *P2) {
82 if (P1 == getEmptyKey() || P1 == getTombstoneKey())
83 return P1 == P2;
84 return isEqual(P1->getValue(), P2);
85 }
86 };
87
88 using EntrySetT = DenseSet<PoolEntry *, PoolEntryDSInfo>;
89
90 EntrySetT EntrySet;
91
92 void removeEntry(PoolEntry *P) { EntrySet.erase(P); }
93
94public:
95 template <typename ValueKeyT> PoolRef getValue(ValueKeyT ValueKey) {
96 typename EntrySetT::iterator I = EntrySet.find_as(ValueKey);
97
98 if (I != EntrySet.end())
12
Taking false branch
99 return PoolRef((*I)->shared_from_this(), &(*I)->getValue());
100
101 auto P = std::make_shared<PoolEntry>(*this, std::move(ValueKey));
13
Calling 'make_shared<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry, llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>> &, llvm::PBQP::Matrix>'
102 EntrySet.insert(P.get());
103 return PoolRef(std::move(P), &P->getValue());
104 }
105};
106
107template <typename VectorT, typename MatrixT> class PoolCostAllocator {
108private:
109 using VectorCostPool = ValuePool<VectorT>;
110 using MatrixCostPool = ValuePool<MatrixT>;
111
112public:
113 using Vector = VectorT;
114 using Matrix = MatrixT;
115 using VectorPtr = typename VectorCostPool::PoolRef;
116 using MatrixPtr = typename MatrixCostPool::PoolRef;
117
118 template <typename VectorKeyT> VectorPtr getVector(VectorKeyT v) {
119 return VectorPool.getValue(std::move(v));
120 }
121
122 template <typename MatrixKeyT> MatrixPtr getMatrix(MatrixKeyT m) {
123 return MatrixPool.getValue(std::move(m));
11
Calling 'ValuePool::getValue'
124 }
125
126private:
127 VectorCostPool VectorPool;
128 MatrixCostPool MatrixPool;
129};
130
131} // end namespace PBQP
132} // end namespace llvm
133
134#endif // LLVM_CODEGEN_PBQP_COSTALLOCATOR_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/shared_ptr.h

1// shared_ptr and weak_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2007-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_H1
50#define _SHARED_PTR_H1 1
51
52#include <bits/shared_ptr_base.h>
53
54namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
55{
56_GLIBCXX_BEGIN_NAMESPACE_VERSION
57
58 /**
59 * @addtogroup pointer_abstractions
60 * @{
61 */
62
63 /// 20.7.2.2.11 shared_ptr I/O
64 template<typename _Ch, typename _Tr, typename _Tp, _Lock_policy _Lp>
65 inline std::basic_ostream<_Ch, _Tr>&
66 operator<<(std::basic_ostream<_Ch, _Tr>& __os,
67 const __shared_ptr<_Tp, _Lp>& __p)
68 {
69 __os << __p.get();
70 return __os;
71 }
72
73 /// 20.7.2.2.10 shared_ptr get_deleter
74 template<typename _Del, typename _Tp, _Lock_policy _Lp>
75 inline _Del*
76 get_deleter(const __shared_ptr<_Tp, _Lp>& __p) noexcept
77 {
78#if __cpp_rtti199711L
79 return static_cast<_Del*>(__p._M_get_deleter(typeid(_Del)));
80#else
81 return 0;
82#endif
83 }
84
85
86 /**
87 * @brief A smart pointer with reference-counted copy semantics.
88 *
89 * The object pointed to is deleted when the last shared_ptr pointing to
90 * it is destroyed or reset.
91 */
92 template<typename _Tp>
93 class shared_ptr : public __shared_ptr<_Tp>
94 {
95 template<typename _Ptr>
96 using _Convertible
97 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
98
99 public:
100 /**
101 * @brief Construct an empty %shared_ptr.
102 * @post use_count()==0 && get()==0
103 */
104 constexpr shared_ptr() noexcept
105 : __shared_ptr<_Tp>() { }
106
107 shared_ptr(const shared_ptr&) noexcept = default;
108
109 /**
110 * @brief Construct a %shared_ptr that owns the pointer @a __p.
111 * @param __p A pointer that is convertible to element_type*.
112 * @post use_count() == 1 && get() == __p
113 * @throw std::bad_alloc, in which case @c delete @a __p is called.
114 */
115 template<typename _Tp1>
116 explicit shared_ptr(_Tp1* __p)
117 : __shared_ptr<_Tp>(__p) { }
118
119 /**
120 * @brief Construct a %shared_ptr that owns the pointer @a __p
121 * and the deleter @a __d.
122 * @param __p A pointer.
123 * @param __d A deleter.
124 * @post use_count() == 1 && get() == __p
125 * @throw std::bad_alloc, in which case @a __d(__p) is called.
126 *
127 * Requirements: _Deleter's copy constructor and destructor must
128 * not throw
129 *
130 * __shared_ptr will release __p by calling __d(__p)
131 */
132 template<typename _Tp1, typename _Deleter>
133 shared_ptr(_Tp1* __p, _Deleter __d)
134 : __shared_ptr<_Tp>(__p, __d) { }
135
136 /**
137 * @brief Construct a %shared_ptr that owns a null pointer
138 * and the deleter @a __d.
139 * @param __p A null pointer constant.
140 * @param __d A deleter.
141 * @post use_count() == 1 && get() == __p
142 * @throw std::bad_alloc, in which case @a __d(__p) is called.
143 *
144 * Requirements: _Deleter's copy constructor and destructor must
145 * not throw
146 *
147 * The last owner will call __d(__p)
148 */
149 template<typename _Deleter>
150 shared_ptr(nullptr_t __p, _Deleter __d)
151 : __shared_ptr<_Tp>(__p, __d) { }
152
153 /**
154 * @brief Construct a %shared_ptr that owns the pointer @a __p
155 * and the deleter @a __d.
156 * @param __p A pointer.
157 * @param __d A deleter.
158 * @param __a An allocator.
159 * @post use_count() == 1 && get() == __p
160 * @throw std::bad_alloc, in which case @a __d(__p) is called.
161 *
162 * Requirements: _Deleter's copy constructor and destructor must
163 * not throw _Alloc's copy constructor and destructor must not
164 * throw.
165 *
166 * __shared_ptr will release __p by calling __d(__p)
167 */
168 template<typename _Tp1, typename _Deleter, typename _Alloc>
169 shared_ptr(_Tp1* __p, _Deleter __d, _Alloc __a)
170 : __shared_ptr<_Tp>(__p, __d, std::move(__a)) { }
171
172 /**
173 * @brief Construct a %shared_ptr that owns a null pointer
174 * and the deleter @a __d.
175 * @param __p A null pointer constant.
176 * @param __d A deleter.
177 * @param __a An allocator.
178 * @post use_count() == 1 && get() == __p
179 * @throw std::bad_alloc, in which case @a __d(__p) is called.
180 *
181 * Requirements: _Deleter's copy constructor and destructor must
182 * not throw _Alloc's copy constructor and destructor must not
183 * throw.
184 *
185 * The last owner will call __d(__p)
186 */
187 template<typename _Deleter, typename _Alloc>
188 shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
189 : __shared_ptr<_Tp>(__p, __d, std::move(__a)) { }
190
191 // Aliasing constructor
192
193 /**
194 * @brief Constructs a %shared_ptr instance that stores @a __p
195 * and shares ownership with @a __r.
196 * @param __r A %shared_ptr.
197 * @param __p A pointer that will remain valid while @a *__r is valid.
198 * @post get() == __p && use_count() == __r.use_count()
199 *
200 * This can be used to construct a @c shared_ptr to a sub-object
201 * of an object managed by an existing @c shared_ptr.
202 *
203 * @code
204 * shared_ptr< pair<int,int> > pii(new pair<int,int>());
205 * shared_ptr<int> pi(pii, &pii->first);
206 * assert(pii.use_count() == 2);
207 * @endcode
208 */
209 template<typename _Tp1>
210 shared_ptr(const shared_ptr<_Tp1>& __r, _Tp* __p) noexcept
211 : __shared_ptr<_Tp>(__r, __p) { }
212
213 /**
214 * @brief If @a __r is empty, constructs an empty %shared_ptr;
215 * otherwise construct a %shared_ptr that shares ownership
216 * with @a __r.
217 * @param __r A %shared_ptr.
218 * @post get() == __r.get() && use_count() == __r.use_count()
219 */
220 template<typename _Tp1, typename = _Convertible<_Tp1*>>
221 shared_ptr(const shared_ptr<_Tp1>& __r) noexcept
222 : __shared_ptr<_Tp>(__r) { }
223
224 /**
225 * @brief Move-constructs a %shared_ptr instance from @a __r.
226 * @param __r A %shared_ptr rvalue.
227 * @post *this contains the old value of @a __r, @a __r is empty.
228 */
229 shared_ptr(shared_ptr&& __r) noexcept
230 : __shared_ptr<_Tp>(std::move(__r)) { }
231
232 /**
233 * @brief Move-constructs a %shared_ptr instance from @a __r.
234 * @param __r A %shared_ptr rvalue.
235 * @post *this contains the old value of @a __r, @a __r is empty.
236 */
237 template<typename _Tp1, typename = _Convertible<_Tp1*>>
238 shared_ptr(shared_ptr<_Tp1>&& __r) noexcept
239 : __shared_ptr<_Tp>(std::move(__r)) { }
240
241 /**
242 * @brief Constructs a %shared_ptr that shares ownership with @a __r
243 * and stores a copy of the pointer stored in @a __r.
244 * @param __r A weak_ptr.
245 * @post use_count() == __r.use_count()
246 * @throw bad_weak_ptr when __r.expired(),
247 * in which case the constructor has no effect.
248 */
249 template<typename _Tp1>
250 explicit shared_ptr(const weak_ptr<_Tp1>& __r)
251 : __shared_ptr<_Tp>(__r) { }
252
253#if _GLIBCXX_USE_DEPRECATED1
254 template<typename _Tp1>
255 shared_ptr(std::auto_ptr<_Tp1>&& __r);
256#endif
257
258 // _GLIBCXX_RESOLVE_LIB_DEFECTS
259 // 2399. shared_ptr's constructor from unique_ptr should be constrained
260 template<typename _Tp1, typename _Del, typename
261 = _Convertible<typename unique_ptr<_Tp1, _Del>::pointer>>
262 shared_ptr(std::unique_ptr<_Tp1, _Del>&& __r)
263 : __shared_ptr<_Tp>(std::move(__r)) { }
264
265 /**
266 * @brief Construct an empty %shared_ptr.
267 * @post use_count() == 0 && get() == nullptr
268 */
269 constexpr shared_ptr(nullptr_t) noexcept : shared_ptr() { }
270
271 shared_ptr& operator=(const shared_ptr&) noexcept = default;
272
273 template<typename _Tp1>
274 shared_ptr&
275 operator=(const shared_ptr<_Tp1>& __r) noexcept
276 {
277 this->__shared_ptr<_Tp>::operator=(__r);
278 return *this;
279 }
280
281#if _GLIBCXX_USE_DEPRECATED1
282 template<typename _Tp1>
283 shared_ptr&
284 operator=(std::auto_ptr<_Tp1>&& __r)
285 {
286 this->__shared_ptr<_Tp>::operator=(std::move(__r));
287 return *this;
288 }
289#endif
290
291 shared_ptr&
292 operator=(shared_ptr&& __r) noexcept
293 {
294 this->__shared_ptr<_Tp>::operator=(std::move(__r));
295 return *this;
296 }
297
298 template<class _Tp1>
299 shared_ptr&
300 operator=(shared_ptr<_Tp1>&& __r) noexcept
301 {
302 this->__shared_ptr<_Tp>::operator=(std::move(__r));
303 return *this;
304 }
305
306 template<typename _Tp1, typename _Del>
307 shared_ptr&
308 operator=(std::unique_ptr<_Tp1, _Del>&& __r)
309 {
310 this->__shared_ptr<_Tp>::operator=(std::move(__r));
311 return *this;
312 }
313
314 private:
315 // This constructor is non-standard, it is used by allocate_shared.
316 template<typename _Alloc, typename... _Args>
317 shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
318 _Args&&... __args)
319 : __shared_ptr<_Tp>(__tag, __a, std::forward<_Args>(__args)...)
16
Calling constructor for '__shared_ptr<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry, __gnu_cxx::_S_atomic>'
320 { }
321
322 template<typename _Tp1, typename _Alloc, typename... _Args>
323 friend shared_ptr<_Tp1>
324 allocate_shared(const _Alloc& __a, _Args&&... __args);
325
326 // This constructor is non-standard, it is used by weak_ptr::lock().
327 shared_ptr(const weak_ptr<_Tp>& __r, std::nothrow_t)
328 : __shared_ptr<_Tp>(__r, std::nothrow) { }
329
330 friend class weak_ptr<_Tp>;
331 };
332
333 // 20.7.2.2.7 shared_ptr comparisons
334 template<typename _Tp1, typename _Tp2>
335 inline bool
336 operator==(const shared_ptr<_Tp1>& __a,
337 const shared_ptr<_Tp2>& __b) noexcept
338 { return __a.get() == __b.get(); }
339
340 template<typename _Tp>
341 inline bool
342 operator==(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
343 { return !__a; }
344
345 template<typename _Tp>
346 inline bool
347 operator==(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
348 { return !__a; }
349
350 template<typename _Tp1, typename _Tp2>
351 inline bool
352 operator!=(const shared_ptr<_Tp1>& __a,
353 const shared_ptr<_Tp2>& __b) noexcept
354 { return __a.get() != __b.get(); }
355
356 template<typename _Tp>
357 inline bool
358 operator!=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
359 { return (bool)__a; }
360
361 template<typename _Tp>
362 inline bool
363 operator!=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
364 { return (bool)__a; }
365
366 template<typename _Tp1, typename _Tp2>
367 inline bool
368 operator<(const shared_ptr<_Tp1>& __a,
369 const shared_ptr<_Tp2>& __b) noexcept
370 {
371 typedef typename std::common_type<_Tp1*, _Tp2*>::type _CT;
372 return std::less<_CT>()(__a.get(), __b.get());
373 }
374
375 template<typename _Tp>
376 inline bool
377 operator<(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
378 { return std::less<_Tp*>()(__a.get(), nullptr); }
379
380 template<typename _Tp>
381 inline bool
382 operator<(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
383 { return std::less<_Tp*>()(nullptr, __a.get()); }
384
385 template<typename _Tp1, typename _Tp2>
386 inline bool
387 operator<=(const shared_ptr<_Tp1>& __a,
388 const shared_ptr<_Tp2>& __b) noexcept
389 { return !(__b < __a); }
390
391 template<typename _Tp>
392 inline bool
393 operator<=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
394 { return !(nullptr < __a); }
395
396 template<typename _Tp>
397 inline bool
398 operator<=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
399 { return !(__a < nullptr); }
400
401 template<typename _Tp1, typename _Tp2>
402 inline bool
403 operator>(const shared_ptr<_Tp1>& __a,
404 const shared_ptr<_Tp2>& __b) noexcept
405 { return (__b < __a); }
406
407 template<typename _Tp>
408 inline bool
409 operator>(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
410 { return std::less<_Tp*>()(nullptr, __a.get()); }
411
412 template<typename _Tp>
413 inline bool
414 operator>(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
415 { return std::less<_Tp*>()(__a.get(), nullptr); }
416
417 template<typename _Tp1, typename _Tp2>
418 inline bool
419 operator>=(const shared_ptr<_Tp1>& __a,
420 const shared_ptr<_Tp2>& __b) noexcept
421 { return !(__a < __b); }
422
423 template<typename _Tp>
424 inline bool
425 operator>=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
426 { return !(__a < nullptr); }
427
428 template<typename _Tp>
429 inline bool
430 operator>=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
431 { return !(nullptr < __a); }
432
433 template<typename _Tp>
434 struct less<shared_ptr<_Tp>> : public _Sp_less<shared_ptr<_Tp>>
435 { };
436
437 // 20.7.2.2.8 shared_ptr specialized algorithms.
438 template<typename _Tp>
439 inline void
440 swap(shared_ptr<_Tp>& __a, shared_ptr<_Tp>& __b) noexcept
441 { __a.swap(__b); }
442
443 // 20.7.2.2.9 shared_ptr casts.
444 template<typename _Tp, typename _Tp1>
445 inline shared_ptr<_Tp>
446 static_pointer_cast(const shared_ptr<_Tp1>& __r) noexcept
447 { return shared_ptr<_Tp>(__r, static_cast<_Tp*>(__r.get())); }
448
449 template<typename _Tp, typename _Tp1>
450 inline shared_ptr<_Tp>
451 const_pointer_cast(const shared_ptr<_Tp1>& __r) noexcept
452 { return shared_ptr<_Tp>(__r, const_cast<_Tp*>(__r.get())); }
453
454 template<typename _Tp, typename _Tp1>
455 inline shared_ptr<_Tp>
456 dynamic_pointer_cast(const shared_ptr<_Tp1>& __r) noexcept
457 {
458 if (_Tp* __p = dynamic_cast<_Tp*>(__r.get()))
459 return shared_ptr<_Tp>(__r, __p);
460 return shared_ptr<_Tp>();
461 }
462
463
464 /**
465 * @brief A smart pointer with weak semantics.
466 *
467 * With forwarding constructors and assignment operators.
468 */
469 template<typename _Tp>
470 class weak_ptr : public __weak_ptr<_Tp>
471 {
472 template<typename _Ptr>
473 using _Convertible
474 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
475
476 public:
477 constexpr weak_ptr() noexcept = default;
478
479 template<typename _Tp1, typename = _Convertible<_Tp1*>>
480 weak_ptr(const shared_ptr<_Tp1>& __r) noexcept
481 : __weak_ptr<_Tp>(__r) { }
482
483 weak_ptr(const weak_ptr&) noexcept = default;
484
485 template<typename _Tp1, typename = _Convertible<_Tp1*>>
486 weak_ptr(const weak_ptr<_Tp1>& __r) noexcept
487 : __weak_ptr<_Tp>(__r) { }
488
489 weak_ptr(weak_ptr&&) noexcept = default;
490
491 template<typename _Tp1, typename = _Convertible<_Tp1*>>
492 weak_ptr(weak_ptr<_Tp1>&& __r) noexcept
493 : __weak_ptr<_Tp>(std::move(__r)) { }
494
495 weak_ptr&
496 operator=(const weak_ptr& __r) noexcept = default;
497
498 template<typename _Tp1>
499 weak_ptr&
500 operator=(const weak_ptr<_Tp1>& __r) noexcept
501 {
502 this->__weak_ptr<_Tp>::operator=(__r);
503 return *this;
504 }
505
506 template<typename _Tp1>
507 weak_ptr&
508 operator=(const shared_ptr<_Tp1>& __r) noexcept
509 {
510 this->__weak_ptr<_Tp>::operator=(__r);
511 return *this;
512 }
513
514 weak_ptr&
515 operator=(weak_ptr&& __r) noexcept = default;
516
517 template<typename _Tp1>
518 weak_ptr&
519 operator=(weak_ptr<_Tp1>&& __r) noexcept
520 {
521 this->__weak_ptr<_Tp>::operator=(std::move(__r));
522 return *this;
523 }
524
525 shared_ptr<_Tp>
526 lock() const noexcept
527 { return shared_ptr<_Tp>(*this, std::nothrow); }
528 };
529
530 // 20.7.2.3.6 weak_ptr specialized algorithms.
531 template<typename _Tp>
532 inline void
533 swap(weak_ptr<_Tp>& __a, weak_ptr<_Tp>& __b) noexcept
534 { __a.swap(__b); }
535
536
537 /// Primary template owner_less
538 template<typename _Tp>
539 struct owner_less;
540
541 /// Partial specialization of owner_less for shared_ptr.
542 template<typename _Tp>
543 struct owner_less<shared_ptr<_Tp>>
544 : public _Sp_owner_less<shared_ptr<_Tp>, weak_ptr<_Tp>>
545 { };
546
547 /// Partial specialization of owner_less for weak_ptr.
548 template<typename _Tp>
549 struct owner_less<weak_ptr<_Tp>>
550 : public _Sp_owner_less<weak_ptr<_Tp>, shared_ptr<_Tp>>
551 { };
552
553 /**
554 * @brief Base class allowing use of member function shared_from_this.
555 */
556 template<typename _Tp>
557 class enable_shared_from_this
558 {
559 protected:
560 constexpr enable_shared_from_this() noexcept { }
561
562 enable_shared_from_this(const enable_shared_from_this&) noexcept { }
563
564 enable_shared_from_this&
565 operator=(const enable_shared_from_this&) noexcept
566 { return *this; }
567
568 ~enable_shared_from_this() { }
569
570 public:
571 shared_ptr<_Tp>
572 shared_from_this()
573 { return shared_ptr<_Tp>(this->_M_weak_this); }
574
575 shared_ptr<const _Tp>
576 shared_from_this() const
577 { return shared_ptr<const _Tp>(this->_M_weak_this); }
578
579 private:
580 template<typename _Tp1>
581 void
582 _M_weak_assign(_Tp1* __p, const __shared_count<>& __n) const noexcept
583 { _M_weak_this._M_assign(__p, __n); }
584
585 template<typename _Tp1, typename _Tp2>
586 friend void
587 __enable_shared_from_this_helper(const __shared_count<>&,
588 const enable_shared_from_this<_Tp1>*,
589 const _Tp2*) noexcept;
590
591 mutable weak_ptr<_Tp> _M_weak_this;
592 };
593
594 template<typename _Tp1, typename _Tp2>
595 inline void
596 __enable_shared_from_this_helper(const __shared_count<>& __pn,
597 const enable_shared_from_this<_Tp1>*
598 __pe, const _Tp2* __px) noexcept
599 {
600 if (__pe != nullptr)
601 __pe->_M_weak_assign(const_cast<_Tp2*>(__px), __pn);
602 }
603
604 /**
605 * @brief Create an object that is owned by a shared_ptr.
606 * @param __a An allocator.
607 * @param __args Arguments for the @a _Tp object's constructor.
608 * @return A shared_ptr that owns the newly created object.
609 * @throw An exception thrown from @a _Alloc::allocate or from the
610 * constructor of @a _Tp.
611 *
612 * A copy of @a __a will be used to allocate memory for the shared_ptr
613 * and the new object.
614 */
615 template<typename _Tp, typename _Alloc, typename... _Args>
616 inline shared_ptr<_Tp>
617 allocate_shared(const _Alloc& __a, _Args&&... __args)
618 {
619 return shared_ptr<_Tp>(_Sp_make_shared_tag(), __a,
15
Calling constructor for 'shared_ptr<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry>'
620 std::forward<_Args>(__args)...);
621 }
622
623 /**
624 * @brief Create an object that is owned by a shared_ptr.
625 * @param __args Arguments for the @a _Tp object's constructor.
626 * @return A shared_ptr that owns the newly created object.
627 * @throw std::bad_alloc, or an exception thrown from the
628 * constructor of @a _Tp.
629 */
630 template<typename _Tp, typename... _Args>
631 inline shared_ptr<_Tp>
632 make_shared(_Args&&... __args)
633 {
634 typedef typename std::remove_const<_Tp>::type _Tp_nc;
635 return std::allocate_shared<_Tp>(std::allocator<_Tp_nc>(),
14
Calling 'allocate_shared<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry, std::allocator<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry>, llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>> &, llvm::PBQP::Matrix>'
636 std::forward<_Args>(__args)...);
637 }
638
639 /// std::hash specialization for shared_ptr.
640 template<typename _Tp>
641 struct hash<shared_ptr<_Tp>>
642 : public __hash_base<size_t, shared_ptr<_Tp>>
643 {
644 size_t
645 operator()(const shared_ptr<_Tp>& __s) const noexcept
646 { return std::hash<_Tp*>()(__s.get()); }
647 };
648
649 // @} group pointer_abstractions
650
651_GLIBCXX_END_NAMESPACE_VERSION
652} // namespace
653
654#endif // _SHARED_PTR_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/shared_ptr_base.h

1// shared_ptr and weak_ptr implementation details -*- C++ -*-
2
3// Copyright (C) 2007-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file bits/shared_ptr_base.h
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_BASE_H1
50#define _SHARED_PTR_BASE_H1 1
51
52#include <typeinfo>
53#include <bits/allocated_ptr.h>
54#include <ext/aligned_buffer.h>
55
56namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
57{
58_GLIBCXX_BEGIN_NAMESPACE_VERSION
59
60#if _GLIBCXX_USE_DEPRECATED1
61 template<typename> class auto_ptr;
62#endif
63
64 /**
65 * @brief Exception possibly thrown by @c shared_ptr.
66 * @ingroup exceptions
67 */
68 class bad_weak_ptr : public std::exception
69 {
70 public:
71 virtual char const* what() const noexcept;
72
73 virtual ~bad_weak_ptr() noexcept;
74 };
75
76 // Substitute for bad_weak_ptr object in the case of -fno-exceptions.
77 inline void
78 __throw_bad_weak_ptr()
79 { _GLIBCXX_THROW_OR_ABORT(bad_weak_ptr())(__builtin_abort()); }
80
81 using __gnu_cxx::_Lock_policy;
82 using __gnu_cxx::__default_lock_policy;
83 using __gnu_cxx::_S_single;
84 using __gnu_cxx::_S_mutex;
85 using __gnu_cxx::_S_atomic;
86
87 // Empty helper class except when the template argument is _S_mutex.
88 template<_Lock_policy _Lp>
89 class _Mutex_base
90 {
91 protected:
92 // The atomic policy uses fully-fenced builtins, single doesn't care.
93 enum { _S_need_barriers = 0 };
94 };
95
96 template<>
97 class _Mutex_base<_S_mutex>
98 : public __gnu_cxx::__mutex
99 {
100 protected:
101 // This policy is used when atomic builtins are not available.
102 // The replacement atomic operations might not have the necessary
103 // memory barriers.
104 enum { _S_need_barriers = 1 };
105 };
106
107 template<_Lock_policy _Lp = __default_lock_policy>
108 class _Sp_counted_base
109 : public _Mutex_base<_Lp>
110 {
111 public:
112 _Sp_counted_base() noexcept
113 : _M_use_count(1), _M_weak_count(1) { }
114
115 virtual
116 ~_Sp_counted_base() noexcept
117 { }
118
119 // Called when _M_use_count drops to zero, to release the resources
120 // managed by *this.
121 virtual void
122 _M_dispose() noexcept = 0;
123
124 // Called when _M_weak_count drops to zero.
125 virtual void
126 _M_destroy() noexcept
127 { delete this; }
128
129 virtual void*
130 _M_get_deleter(const std::type_info&) noexcept = 0;
131
132 void
133 _M_add_ref_copy()
134 { __gnu_cxx::__atomic_add_dispatch(&_M_use_count, 1); }
135
136 void
137 _M_add_ref_lock();
138
139 bool
140 _M_add_ref_lock_nothrow();
141
142 void
143 _M_release() noexcept
144 {
145 // Be race-detector-friendly. For more info see bits/c++config.
146 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_use_count);
147 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1)
148 {
149 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_use_count);
150 _M_dispose();
151 // There must be a memory barrier between dispose() and destroy()
152 // to ensure that the effects of dispose() are observed in the
153 // thread that runs destroy().
154 // See http://gcc.gnu.org/ml/libstdc++/2005-11/msg00136.html
155 if (_Mutex_base<_Lp>::_S_need_barriers)
156 {
157 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
158 }
159
160 // Be race-detector-friendly. For more info see bits/c++config.
161 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
162 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count,
163 -1) == 1)
164 {
165 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
166 _M_destroy();
167 }
168 }
169 }
170
171 void
172 _M_weak_add_ref() noexcept
173 { __gnu_cxx::__atomic_add_dispatch(&_M_weak_count, 1); }
174
175 void
176 _M_weak_release() noexcept
177 {
178 // Be race-detector-friendly. For more info see bits/c++config.
179 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
180 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count, -1) == 1)
181 {
182 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
183 if (_Mutex_base<_Lp>::_S_need_barriers)
184 {
185 // See _M_release(),
186 // destroy() must observe results of dispose()
187 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
188 }
189 _M_destroy();
190 }
191 }
192
193 long
194 _M_get_use_count() const noexcept
195 {
196 // No memory barrier is used here so there is no synchronization
197 // with other threads.
198 return __atomic_load_n(&_M_use_count, __ATOMIC_RELAXED0);
199 }
200
201 private:
202 _Sp_counted_base(_Sp_counted_base const&) = delete;
203 _Sp_counted_base& operator=(_Sp_counted_base const&) = delete;
204
205 _Atomic_word _M_use_count; // #shared
206 _Atomic_word _M_weak_count; // #weak + (#shared != 0)
207 };
208
209 template<>
210 inline void
211 _Sp_counted_base<_S_single>::
212 _M_add_ref_lock()
213 {
214 if (_M_use_count == 0)
215 __throw_bad_weak_ptr();
216 ++_M_use_count;
217 }
218
219 template<>
220 inline void
221 _Sp_counted_base<_S_mutex>::
222 _M_add_ref_lock()
223 {
224 __gnu_cxx::__scoped_lock sentry(*this);
225 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
226 {
227 _M_use_count = 0;
228 __throw_bad_weak_ptr();
229 }
230 }
231
232 template<>
233 inline void
234 _Sp_counted_base<_S_atomic>::
235 _M_add_ref_lock()
236 {
237 // Perform lock-free add-if-not-zero operation.
238 _Atomic_word __count = _M_get_use_count();
239 do
240 {
241 if (__count == 0)
242 __throw_bad_weak_ptr();
243 // Replace the current counter value with the old value + 1, as
244 // long as it's not changed meanwhile.
245 }
246 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
247 true, __ATOMIC_ACQ_REL4,
248 __ATOMIC_RELAXED0));
249 }
250
251 template<>
252 inline bool
253 _Sp_counted_base<_S_single>::
254 _M_add_ref_lock_nothrow()
255 {
256 if (_M_use_count == 0)
257 return false;
258 ++_M_use_count;
259 return true;
260 }
261
262 template<>
263 inline bool
264 _Sp_counted_base<_S_mutex>::
265 _M_add_ref_lock_nothrow()
266 {
267 __gnu_cxx::__scoped_lock sentry(*this);
268 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
269 {
270 _M_use_count = 0;
271 return false;
272 }
273 return true;
274 }
275
276 template<>
277 inline bool
278 _Sp_counted_base<_S_atomic>::
279 _M_add_ref_lock_nothrow()
280 {
281 // Perform lock-free add-if-not-zero operation.
282 _Atomic_word __count = _M_get_use_count();
283 do
284 {
285 if (__count == 0)
286 return false;
287 // Replace the current counter value with the old value + 1, as
288 // long as it's not changed meanwhile.
289 }
290 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
291 true, __ATOMIC_ACQ_REL4,
292 __ATOMIC_RELAXED0));
293 return true;
294 }
295
296 template<>
297 inline void
298 _Sp_counted_base<_S_single>::_M_add_ref_copy()
299 { ++_M_use_count; }
300
301 template<>
302 inline void
303 _Sp_counted_base<_S_single>::_M_release() noexcept
304 {
305 if (--_M_use_count == 0)
306 {
307 _M_dispose();
308 if (--_M_weak_count == 0)
309 _M_destroy();
310 }
311 }
312
313 template<>
314 inline void
315 _Sp_counted_base<_S_single>::_M_weak_add_ref() noexcept
316 { ++_M_weak_count; }
317
318 template<>
319 inline void
320 _Sp_counted_base<_S_single>::_M_weak_release() noexcept
321 {
322 if (--_M_weak_count == 0)
323 _M_destroy();
324 }
325
326 template<>
327 inline long
328 _Sp_counted_base<_S_single>::_M_get_use_count() const noexcept
329 { return _M_use_count; }
330
331
332 // Forward declarations.
333 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
334 class __shared_ptr;
335
336 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
337 class __weak_ptr;
338
339 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
340 class __enable_shared_from_this;
341
342 template<typename _Tp>
343 class shared_ptr;
344
345 template<typename _Tp>
346 class weak_ptr;
347
348 template<typename _Tp>
349 struct owner_less;
350
351 template<typename _Tp>
352 class enable_shared_from_this;
353
354 template<_Lock_policy _Lp = __default_lock_policy>
355 class __weak_count;
356
357 template<_Lock_policy _Lp = __default_lock_policy>
358 class __shared_count;
359
360
361 // Counted ptr with no deleter or allocator support
362 template<typename _Ptr, _Lock_policy _Lp>
363 class _Sp_counted_ptr final : public _Sp_counted_base<_Lp>
364 {
365 public:
366 explicit
367 _Sp_counted_ptr(_Ptr __p) noexcept
368 : _M_ptr(__p) { }
369
370 virtual void
371 _M_dispose() noexcept
372 { delete _M_ptr; }
373
374 virtual void
375 _M_destroy() noexcept
376 { delete this; }
377
378 virtual void*
379 _M_get_deleter(const std::type_info&) noexcept
380 { return nullptr; }
381
382 _Sp_counted_ptr(const _Sp_counted_ptr&) = delete;
383 _Sp_counted_ptr& operator=(const _Sp_counted_ptr&) = delete;
384
385 private:
386 _Ptr _M_ptr;
387 };
388
389 template<>
390 inline void
391 _Sp_counted_ptr<nullptr_t, _S_single>::_M_dispose() noexcept { }
392
393 template<>
394 inline void
395 _Sp_counted_ptr<nullptr_t, _S_mutex>::_M_dispose() noexcept { }
396
397 template<>
398 inline void
399 _Sp_counted_ptr<nullptr_t, _S_atomic>::_M_dispose() noexcept { }
400
401 template<int _Nm, typename _Tp,
402 bool __use_ebo = !__is_final(_Tp) && __is_empty(_Tp)>
403 struct _Sp_ebo_helper;
404
405 /// Specialization using EBO.
406 template<int _Nm, typename _Tp>
407 struct _Sp_ebo_helper<_Nm, _Tp, true> : private _Tp
408 {
409 explicit _Sp_ebo_helper(const _Tp& __tp) : _Tp(__tp) { }
410
411 static _Tp&
412 _S_get(_Sp_ebo_helper& __eboh) { return static_cast<_Tp&>(__eboh); }
413 };
414
415 /// Specialization not using EBO.
416 template<int _Nm, typename _Tp>
417 struct _Sp_ebo_helper<_Nm, _Tp, false>
418 {
419 explicit _Sp_ebo_helper(const _Tp& __tp) : _M_tp(__tp) { }
420
421 static _Tp&
422 _S_get(_Sp_ebo_helper& __eboh)
423 { return __eboh._M_tp; }
424
425 private:
426 _Tp _M_tp;
427 };
428
429 // Support for custom deleter and/or allocator
430 template<typename _Ptr, typename _Deleter, typename _Alloc, _Lock_policy _Lp>
431 class _Sp_counted_deleter final : public _Sp_counted_base<_Lp>
432 {
433 class _Impl : _Sp_ebo_helper<0, _Deleter>, _Sp_ebo_helper<1, _Alloc>
434 {
435 typedef _Sp_ebo_helper<0, _Deleter> _Del_base;
436 typedef _Sp_ebo_helper<1, _Alloc> _Alloc_base;
437
438 public:
439 _Impl(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
440 : _M_ptr(__p), _Del_base(__d), _Alloc_base(__a)
441 { }
442
443 _Deleter& _M_del() noexcept { return _Del_base::_S_get(*this); }
444 _Alloc& _M_alloc() noexcept { return _Alloc_base::_S_get(*this); }
445
446 _Ptr _M_ptr;
447 };
448
449 public:
450 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_deleter>;
451
452 // __d(__p) must not throw.
453 _Sp_counted_deleter(_Ptr __p, _Deleter __d) noexcept
454 : _M_impl(__p, __d, _Alloc()) { }
455
456 // __d(__p) must not throw.
457 _Sp_counted_deleter(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
458 : _M_impl(__p, __d, __a) { }
459
460 ~_Sp_counted_deleter() noexcept { }
461
462 virtual void
463 _M_dispose() noexcept
464 { _M_impl._M_del()(_M_impl._M_ptr); }
465
466 virtual void
467 _M_destroy() noexcept
468 {
469 __allocator_type __a(_M_impl._M_alloc());
470 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
471 this->~_Sp_counted_deleter();
472 }
473
474 virtual void*
475 _M_get_deleter(const std::type_info& __ti) noexcept
476 {
477#if __cpp_rtti199711L
478 // _GLIBCXX_RESOLVE_LIB_DEFECTS
479 // 2400. shared_ptr's get_deleter() should use addressof()
480 return __ti == typeid(_Deleter)
481 ? std::__addressof(_M_impl._M_del())
482 : nullptr;
483#else
484 return nullptr;
485#endif
486 }
487
488 private:
489 _Impl _M_impl;
490 };
491
492 // helpers for make_shared / allocate_shared
493
494 struct _Sp_make_shared_tag { };
495
496 template<typename _Tp, typename _Alloc, _Lock_policy _Lp>
497 class _Sp_counted_ptr_inplace final : public _Sp_counted_base<_Lp>
498 {
499 class _Impl : _Sp_ebo_helper<0, _Alloc>
500 {
501 typedef _Sp_ebo_helper<0, _Alloc> _A_base;
502
503 public:
504 explicit _Impl(_Alloc __a) noexcept : _A_base(__a) { }
505
506 _Alloc& _M_alloc() noexcept { return _A_base::_S_get(*this); }
507
508 __gnu_cxx::__aligned_buffer<_Tp> _M_storage;
509 };
510
511 public:
512 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_ptr_inplace>;
513
514 template<typename... _Args>
515 _Sp_counted_ptr_inplace(_Alloc __a, _Args&&... __args)
516 : _M_impl(__a)
517 {
518 // _GLIBCXX_RESOLVE_LIB_DEFECTS
519 // 2070. allocate_shared should use allocator_traits<A>::construct
520 allocator_traits<_Alloc>::construct(__a, _M_ptr(),
19
Calling 'allocator_traits::construct'
521 std::forward<_Args>(__args)...); // might throw
522 }
523
524 ~_Sp_counted_ptr_inplace() noexcept { }
525
526 virtual void
527 _M_dispose() noexcept
528 {
529 allocator_traits<_Alloc>::destroy(_M_impl._M_alloc(), _M_ptr());
530 }
531
532 // Override because the allocator needs to know the dynamic type
533 virtual void
534 _M_destroy() noexcept
535 {
536 __allocator_type __a(_M_impl._M_alloc());
537 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
538 this->~_Sp_counted_ptr_inplace();
539 }
540
541 // Sneaky trick so __shared_ptr can get the managed pointer
542 virtual void*
543 _M_get_deleter(const std::type_info& __ti) noexcept
544 {
545#if __cpp_rtti199711L
546 if (__ti == typeid(_Sp_make_shared_tag))
547 return const_cast<typename remove_cv<_Tp>::type*>(_M_ptr());
548#endif
549 return nullptr;
550 }
551
552 private:
553 _Tp* _M_ptr() noexcept { return _M_impl._M_storage._M_ptr(); }
554
555 _Impl _M_impl;
556 };
557
558
559 template<_Lock_policy _Lp>
560 class __shared_count
561 {
562 public:
563 constexpr __shared_count() noexcept : _M_pi(0)
564 { }
565
566 template<typename _Ptr>
567 explicit
568 __shared_count(_Ptr __p) : _M_pi(0)
569 {
570 __tryif (true)
571 {
572 _M_pi = new _Sp_counted_ptr<_Ptr, _Lp>(__p);
573 }
574 __catch(...)if (false)
575 {
576 delete __p;
577 __throw_exception_again;
578 }
579 }
580
581 template<typename _Ptr, typename _Deleter>
582 __shared_count(_Ptr __p, _Deleter __d)
583 : __shared_count(__p, std::move(__d), allocator<void>())
584 { }
585
586 template<typename _Ptr, typename _Deleter, typename _Alloc>
587 __shared_count(_Ptr __p, _Deleter __d, _Alloc __a) : _M_pi(0)
588 {
589 typedef _Sp_counted_deleter<_Ptr, _Deleter, _Alloc, _Lp> _Sp_cd_type;
590 __tryif (true)
591 {
592 typename _Sp_cd_type::__allocator_type __a2(__a);
593 auto __guard = std::__allocate_guarded(__a2);
594 _Sp_cd_type* __mem = __guard.get();
595 ::new (__mem) _Sp_cd_type(__p, std::move(__d), std::move(__a));
596 _M_pi = __mem;
597 __guard = nullptr;
598 }
599 __catch(...)if (false)
600 {
601 __d(__p); // Call _Deleter on __p.
602 __throw_exception_again;
603 }
604 }
605
606 template<typename _Tp, typename _Alloc, typename... _Args>
607 __shared_count(_Sp_make_shared_tag, _Tp*, const _Alloc& __a,
608 _Args&&... __args)
609 : _M_pi(0)
610 {
611 typedef _Sp_counted_ptr_inplace<_Tp, _Alloc, _Lp> _Sp_cp_type;
612 typename _Sp_cp_type::__allocator_type __a2(__a);
613 auto __guard = std::__allocate_guarded(__a2);
614 _Sp_cp_type* __mem = __guard.get();
615 ::new (__mem) _Sp_cp_type(std::move(__a),
18
Calling constructor for '_Sp_counted_ptr_inplace<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry, std::allocator<llvm::PBQP::ValuePool<llvm::PBQP::MDMatrix<llvm::PBQP::RegAlloc::MatrixMetadata>>::PoolEntry>, __gnu_cxx::_S_atomic>'
616 std::forward<_Args>(__args)...);
617 _M_pi = __mem;
618 __guard = nullptr;
619 }
620
621#if _GLIBCXX_USE_DEPRECATED1
622 // Special case for auto_ptr<_Tp> to provide the strong guarantee.
623 template<typename _Tp>
624 explicit
625 __shared_count(std::auto_ptr<_Tp>&& __r);
626#endif
627
628 // Special case for unique_ptr<_Tp,_Del> to provide the strong guarantee.
629 template<typename _Tp, typename _Del>
630 explicit
631 __shared_count(std::unique_ptr<_Tp, _Del>&& __r) : _M_pi(0)
632 {
633 // _GLIBCXX_RESOLVE_LIB_DEFECTS
634 // 2415. Inconsistency between unique_ptr and shared_ptr
635 if (__r.get() == nullptr)
636 return;
637
638 using _Ptr = typename unique_ptr<_Tp, _Del>::pointer;
639 using _Del2 = typename conditional<is_reference<_Del>::value,
640 reference_wrapper<typename remove_reference<_Del>::type>,
641 _Del>::type;
642 using _Sp_cd_type
643 = _Sp_counted_deleter<_Ptr, _Del2, allocator<void>, _Lp>;
644 using _Alloc = allocator<_Sp_cd_type>;
645 using _Alloc_traits = allocator_traits<_Alloc>;
646 _Alloc __a;
647 _Sp_cd_type* __mem = _Alloc_traits::allocate(__a, 1);
648 _Alloc_traits::construct(__a, __mem, __r.release(),
649 __r.get_deleter()); // non-throwing
650 _M_pi = __mem;
651 }
652
653 // Throw bad_weak_ptr when __r._M_get_use_count() == 0.
654 explicit __shared_count(const __weak_count<_Lp>& __r);
655
656 // Does not throw if __r._M_get_use_count() == 0, caller must check.
657 explicit __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t);
658
659 ~__shared_count() noexcept
660 {
661 if (_M_pi != nullptr)
662 _M_pi->_M_release();
663 }
664
665 __shared_count(const __shared_count& __r) noexcept
666 : _M_pi(__r._M_pi)
667 {
668 if (_M_pi != 0)
669 _M_pi->_M_add_ref_copy();
670 }
671
672 __shared_count&
673 operator=(const __shared_count& __r) noexcept
674 {
675 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
676 if (__tmp != _M_pi)
677 {
678 if (__tmp != 0)
679 __tmp->_M_add_ref_copy();
680 if (_M_pi != 0)
681 _M_pi->_M_release();
682 _M_pi = __tmp;
683 }
684 return *this;
685 }
686
687 void
688 _M_swap(__shared_count& __r) noexcept
689 {
690 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
691 __r._M_pi = _M_pi;
692 _M_pi = __tmp;
693 }
694
695 long
696 _M_get_use_count() const noexcept
697 { return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; }
698
699 bool
700 _M_unique() const noexcept
701 { return this->_M_get_use_count() == 1; }
702
703 void*
704 _M_get_deleter(const std::type_info& __ti) const noexcept
705 { return _M_pi ? _M_pi->_M_get_deleter(__ti) : nullptr; }
706
707 bool
708 _M_less(const __shared_count& __rhs) const noexcept
709 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
710
711 bool
712 _M_less(const __weak_count<_Lp>& __rhs) const noexcept
713 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
714
715 // Friend function injected into enclosing namespace and found by ADL
716 friend inline bool
717 operator==(const __shared_count& __a, const __shared_count& __b) noexcept
718 { return __a._M_pi == __b._M_pi; }
719
720 private:
721 friend class __weak_count<_Lp>;
722
723 _Sp_counted_base<_Lp>* _M_pi;
724 };
725
726
727 template<_Lock_policy _Lp>
728 class __weak_count
729 {
730 public:
731 constexpr __weak_count() noexcept : _M_pi(nullptr)
732 { }
733
734 __weak_count(const __shared_count<_Lp>& __r) noexcept
735 : _M_pi(__r._M_pi)
736 {
737 if (_M_pi != nullptr)
738 _M_pi->_M_weak_add_ref();
739 }
740
741 __weak_count(const __weak_count& __r) noexcept
742 : _M_pi(__r._M_pi)
743 {
744 if (_M_pi != nullptr)
745 _M_pi->_M_weak_add_ref();
746 }
747
748 __weak_count(__weak_count&& __r) noexcept
749 : _M_pi(__r._M_pi)
750 { __r._M_pi = nullptr; }
751
752 ~__weak_count() noexcept
753 {
754 if (_M_pi != nullptr)
755 _M_pi->_M_weak_release();
756 }
757
758 __weak_count&
759 operator=(const __shared_count<_Lp>& __r) noexcept
760 {
761 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
762 if (__tmp != nullptr)
763 __tmp->_M_weak_add_ref();
764 if (_M_pi != nullptr)
765 _M_pi->_M_weak_release();
766 _M_pi = __tmp;
767 return *this;
768 }
769
770 __weak_count&
771 operator=(const __weak_count& __r) noexcept
772 {
773 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
774 if (__tmp != nullptr)
775 __tmp->_M_weak_add_ref();
776 if (_M_pi != nullptr)
777 _M_pi->_M_weak_release();
778 _M_pi = __tmp;
779 return *this;
780 }
781
782 __weak_count&
783 operator=(__weak_count&& __r) noexcept
784 {
785 if (_M_pi != nullptr)
786 _M_pi->_M_weak_release();
787 _M_pi = __r._M_pi;
788 __r._M_pi = nullptr;
789 return *this;
790 }
791
792 void
793 _M_swap(__weak_count& __r) noexcept
794 {
795 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
796 __r._M_pi = _M_pi;
797 _M_pi = __tmp;
798 }
799
800 long
801 _M_get_use_count() const noexcept
802 { return _M_pi != nullptr ? _M_pi->_M_get_use_count() : 0; }
803
804 bool
805 _M_less(const __weak_count& __rhs) const noexcept
806 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
807
808 bool
809 _M_less(const __shared_count<_Lp>& __rhs) const noexcept
810 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
811
812 // Friend function injected into enclosing namespace and found by ADL
813 friend inline bool
814 operator==(const __weak_count& __a, const __weak_count& __b) noexcept
815 { return __a._M_pi == __b._M_pi; }
816
817 private:
818 friend class __shared_count<_Lp>;
819
820 _Sp_counted_base<_Lp>* _M_pi;
821 };
822
823 // Now that __weak_count is defined we can define this constructor:
824 template<_Lock_policy _Lp>
825 inline
826 __shared_count<_Lp>::__shared_count(const __weak_count<_Lp>& __r)
827 : _M_pi(__r._M_pi)
828 {
829 if (_M_pi != nullptr)
830 _M_pi->_M_add_ref_lock();
831 else
832 __throw_bad_weak_ptr();
833 }
834
835 // Now that __weak_count is defined we can define this constructor:
836 template<_Lock_policy _Lp>
837 inline
838 __shared_count<_Lp>::
839 __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t)
840 : _M_pi(__r._M_pi)
841 {
842 if (_M_pi != nullptr)
843 if (!_M_pi->_M_add_ref_lock_nothrow())
844 _M_pi = nullptr;
845 }
846
847 // Support for enable_shared_from_this.
848
849 // Friend of __enable_shared_from_this.
850 template<_Lock_policy _Lp, typename _Tp1, typename _Tp2>
851 void
852 __enable_shared_from_this_helper(const __shared_count<_Lp>&,
853 const __enable_shared_from_this<_Tp1,
854 _Lp>*, const _Tp2*) noexcept;
855
856 // Friend of enable_shared_from_this.
857 template<typename _Tp1, typename _Tp2>
858 void
859 __enable_shared_from_this_helper(const __shared_count<>&,
860 const enable_shared_from_this<_Tp1>*,
861 const _Tp2*) noexcept;
862
863 template<_Lock_policy _Lp>
864 inline void
865 __enable_shared_from_this_helper(const __shared_count<_Lp>&, ...) noexcept
866 { }
867
868
869 template<typename _Tp, _Lock_policy _Lp>
870 class __shared_ptr
871 {
872 template<typename _Ptr>
873 using _Convertible
874 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
875
876 public:
877 typedef _Tp element_type;
878
879 constexpr __shared_ptr() noexcept
880 : _M_ptr(0), _M_refcount()
881 { }
882
883 template<typename _Tp1>
884 explicit __shared_ptr(_Tp1* __p)
885 : _M_ptr(__p), _M_refcount(__p)
886 {
887 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
888 static_assert( !is_void<_Tp1>::value, "incomplete type" );
889 static_assert( sizeof(_Tp1) > 0, "incomplete type" );
890 __enable_shared_from_this_helper(_M_refcount, __p, __p);
891 }
892
893 template<typename _Tp1, typename _Deleter>
894 __shared_ptr(_Tp1* __p, _Deleter __d)
895 : _M_ptr(__p), _M_refcount(__p, __d)
896 {
897 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
898 // TODO requires _Deleter CopyConstructible and __d(__p) well-formed
899 __enable_shared_from_this_helper(_M_refcount, __p, __p);
900 }
901
902 template<typename _Tp1, typename _Deleter, typename _Alloc>
903 __shared_ptr(_Tp1* __p, _Deleter __d, _Alloc __a)
904 : _M_ptr(__p), _M_refcount(__p, __d, std::move(__a))
905 {
906 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
907 // TODO requires _Deleter CopyConstructible and __d(__p) well-formed
908 __enable_shared_from_this_helper(_M_refcount, __p, __p);
909 }
910
911 template<typename _Deleter>
912 __shared_ptr(nullptr_t __p, _Deleter __d)
913 : _M_ptr(0), _M_refcount(__p, __d)
914 { }
915
916 template<typename _Deleter, typename _Alloc>
917 __shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
918 : _M_ptr(0), _M_refcount(__p, __d, std::move(__a))
919 { }
920
921 template<typename _Tp1>
922 __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, _Tp* __p) noexcept
923 : _M_ptr(__p), _M_refcount(__r._M_refcount) // never throws
924 { }
925
926 __shared_ptr(const __shared_ptr&) noexcept = default;
927 __shared_ptr& operator=(const __shared_ptr&) noexcept = default;
928 ~__shared_ptr() = default;
929
930 template<typename _Tp1, typename = _Convertible<_Tp1*>>
931 __shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
932 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
933 { }
934
935 __shared_ptr(__shared_ptr&& __r) noexcept
936 : _M_ptr(__r._M_ptr), _M_refcount()
937 {
938 _M_refcount._M_swap(__r._M_refcount);
939 __r._M_ptr = 0;
940 }
941
942 template<typename _Tp1, typename = _Convertible<_Tp1*>>
943 __shared_ptr(__shared_ptr<_Tp1, _Lp>&& __r) noexcept
944 : _M_ptr(__r._M_ptr), _M_refcount()
945 {
946 _M_refcount._M_swap(__r._M_refcount);
947 __r._M_ptr = 0;
948 }
949
950 template<typename _Tp1>
951 explicit __shared_ptr(const __weak_ptr<_Tp1, _Lp>& __r)
952 : _M_refcount(__r._M_refcount) // may throw
953 {
954 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
955
956 // It is now safe to copy __r._M_ptr, as
957 // _M_refcount(__r._M_refcount) did not throw.
958 _M_ptr = __r._M_ptr;
959 }
960
961 // If an exception is thrown this constructor has no effect.
962 template<typename _Tp1, typename _Del, typename
963 = _Convertible<typename unique_ptr<_Tp1, _Del>::pointer>>
964 __shared_ptr(std::unique_ptr<_Tp1, _Del>&& __r)
965 : _M_ptr(__r.get()), _M_refcount()
966 {
967 __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
968 auto __raw = _S_raw_ptr(__r.get());
969 _M_refcount = __shared_count<_Lp>(std::move(__r));
970 __enable_shared_from_this_helper(_M_refcount, __raw, __raw);
971 }
972
973#if _GLIBCXX_USE_DEPRECATED1
974 // Postcondition: use_count() == 1 and __r.get() == 0
975 template<typename _Tp1>
976 __shared_ptr(std::auto_ptr<_Tp1>&& __r);
977#endif
978
979 constexpr __shared_ptr(nullptr_t) noexcept : __shared_ptr() { }
980
981 template<typename _Tp1>
982 __shared_ptr&
983 operator=(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
984 {
985 _M_ptr = __r._M_ptr;
986 _M_refcount = __r._M_refcount; // __shared_count::op= doesn't throw
987 return *this;
988 }
989
990#if _GLIBCXX_USE_DEPRECATED1
991 template<typename _Tp1>
992 __shared_ptr&
993 operator=(std::auto_ptr<_Tp1>&& __r)
994 {
995 __shared_ptr(std::move(__r)).swap(*this);
996 return *this;
997 }
998#endif
999
1000 __shared_ptr&
1001 operator=(__shared_ptr&& __r) noexcept
1002 {
1003 __shared_ptr(std::move(__r)).swap(*this);
1004 return *this;
1005 }
1006
1007 template<class _Tp1>
1008 __shared_ptr&
1009 operator=(__shared_ptr<_Tp1, _Lp>&& __r) noexcept
1010 {
1011 __shared_ptr(std::move(__r)).swap(*this);
1012 return *this;
1013 }
1014
1015 template<typename _Tp1, typename _Del>
1016 __shared_ptr&
1017 operator=(std::unique_ptr<_Tp1, _Del>&& __r)
1018 {
1019 __shared_ptr(std::move(__r)).swap(*this);
1020 return *this;
1021 }
1022
1023 void
1024 reset() noexcept
1025 { __shared_ptr().swap(*this); }
1026
1027 template<typename _Tp1>
1028 void
1029 reset(_Tp1* __p) // _Tp1 must be complete.
1030 {
1031 // Catch self-reset errors.
1032 __glibcxx_assert(__p == 0 || __p != _M_ptr);
1033 __shared_ptr(__p).swap(*this);
1034 }
1035
1036 template<typename _Tp1, typename _Deleter>
1037 void
1038 reset(_Tp1* __p, _Deleter __d)
1039 { __shared_ptr(__p, __d).swap(*this); }
1040
1041 template<typename _Tp1, typename _Deleter, typename _Alloc>
1042 void
1043 reset(_Tp1* __p, _Deleter __d, _Alloc __a)
1044 { __shared_ptr(__p, __d, std::move(__a)).swap(*this); }
1045
1046 // Allow class instantiation when _Tp is [cv-qual] void.
1047 typename std::add_lvalue_reference<_Tp>::type
1048 operator*() const noexcept
1049 {
1050 __glibcxx_assert(_M_ptr != 0);
1051 return *_M_ptr;
1052 }
1053
1054 _Tp*
1055 operator->() const noexcept
1056 {
1057 _GLIBCXX_DEBUG_PEDASSERT(_M_ptr != 0);
1058 return _M_ptr;
1059 }
1060
1061 _Tp*
1062 get() const noexcept
1063 { return _M_ptr; }
1064
1065 explicit operator bool() const // never throws
1066 { return _M_ptr == 0 ? false : true; }
1067
1068 bool
1069 unique() const noexcept
1070 { return _M_refcount._M_unique(); }
1071
1072 long
1073 use_count() const noexcept
1074 { return _M_refcount._M_get_use_count(); }
1075
1076 void
1077 swap(__shared_ptr<_Tp, _Lp>& __other) noexcept
1078 {
1079 std::swap(_M_ptr, __other._M_ptr);
1080 _M_refcount._M_swap(__other._M_refcount);
1081 }
1082
1083 template<typename _Tp1>
1084 bool
1085 owner_before(__shared_ptr<_Tp1, _Lp> const& __rhs) const
1086 { return _M_refcount._M_less(__rhs._M_refcount); }
1087
1088 template<typename _Tp1>
1089 bool
1090 owner_before(__weak_ptr<_Tp1, _Lp> const& __rhs) const
1091 { return _M_refcount._M_less(__rhs._M_refcount); }
1092
1093#if __cpp_rtti199711L
1094 protected:
1095 // This constructor is non-standard, it is used by allocate_shared.
1096 template<typename _Alloc, typename... _Args>
1097 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1098 _Args&&... __args)
1099 : _M_ptr(), _M_refcount(__tag, (_Tp*)0, __a,
17
Calling constructor for '__shared_count<__gnu_cxx::_S_atomic>'
1100 std::forward<_Args>(__args)...)
1101 {
1102 // _M_ptr needs to point to the newly constructed object.
1103 // This relies on _Sp_counted_ptr_inplace::_M_get_deleter.
1104 void* __p = _M_refcount._M_get_deleter(typeid(__tag));
1105 _M_ptr = static_cast<_Tp*>(__p);
1106 __enable_shared_from_this_helper(_M_refcount, _M_ptr, _M_ptr);
1107 }
1108#else
1109 template<typename _Alloc>
1110 struct _Deleter
1111 {
1112 void operator()(typename _Alloc::value_type* __ptr)
1113 {
1114 __allocated_ptr<_Alloc> __guard{ _M_alloc, __ptr };
1115 allocator_traits<_Alloc>::destroy(_M_alloc, __guard.get());
1116 }
1117 _Alloc _M_alloc;
1118 };
1119
1120 template<typename _Alloc, typename... _Args>
1121 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1122 _Args&&... __args)
1123 : _M_ptr(), _M_refcount()
1124 {
1125 typedef typename allocator_traits<_Alloc>::template
1126 rebind_traits<typename std::remove_cv<_Tp>::type> __traits;
1127 _Deleter<typename __traits::allocator_type> __del = { __a };
1128 auto __guard = std::__allocate_guarded(__del._M_alloc);
1129 auto __ptr = __guard.get();
1130 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1131 // 2070. allocate_shared should use allocator_traits<A>::construct
1132 __traits::construct(__del._M_alloc, __ptr,
1133 std::forward<_Args>(__args)...);
1134 __guard = nullptr;
1135 __shared_count<_Lp> __count(__ptr, __del, __del._M_alloc);
1136 _M_refcount._M_swap(__count);
1137 _M_ptr = __ptr;
1138 __enable_shared_from_this_helper(_M_refcount, _M_ptr, _M_ptr);
1139 }
1140#endif
1141
1142 template<typename _Tp1, _Lock_policy _Lp1, typename _Alloc,
1143 typename... _Args>
1144 friend __shared_ptr<_Tp1, _Lp1>
1145 __allocate_shared(const _Alloc& __a, _Args&&... __args);
1146
1147 // This constructor is used by __weak_ptr::lock() and
1148 // shared_ptr::shared_ptr(const weak_ptr&, std::nothrow_t).
1149 __shared_ptr(const __weak_ptr<_Tp, _Lp>& __r, std::nothrow_t)
1150 : _M_refcount(__r._M_refcount, std::nothrow)
1151 {
1152 _M_ptr = _M_refcount._M_get_use_count() ? __r._M_ptr : nullptr;
1153 }
1154
1155 friend class __weak_ptr<_Tp, _Lp>;
1156
1157 private:
1158 void*
1159 _M_get_deleter(const std::type_info& __ti) const noexcept
1160 { return _M_refcount._M_get_deleter(__ti); }
1161
1162 template<typename _Tp1>
1163 static _Tp1*
1164 _S_raw_ptr(_Tp1* __ptr)
1165 { return __ptr; }
1166
1167 template<typename _Tp1>
1168 static auto
1169 _S_raw_ptr(_Tp1 __ptr) -> decltype(std::__addressof(*__ptr))
1170 { return std::__addressof(*__ptr); }
1171
1172 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1173 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1174
1175 template<typename _Del, typename _Tp1, _Lock_policy _Lp1>
1176 friend _Del* get_deleter(const __shared_ptr<_Tp1, _Lp1>&) noexcept;
1177
1178 _Tp* _M_ptr; // Contained pointer.
1179 __shared_count<_Lp> _M_refcount; // Reference counter.
1180 };
1181
1182
1183 // 20.7.2.2.7 shared_ptr comparisons
1184 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1185 inline bool
1186 operator==(const __shared_ptr<_Tp1, _Lp>& __a,
1187 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1188 { return __a.get() == __b.get(); }
1189
1190 template<typename _Tp, _Lock_policy _Lp>
1191 inline bool
1192 operator==(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1193 { return !__a; }
1194
1195 template<typename _Tp, _Lock_policy _Lp>
1196 inline bool
1197 operator==(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1198 { return !__a; }
1199
1200 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1201 inline bool
1202 operator!=(const __shared_ptr<_Tp1, _Lp>& __a,
1203 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1204 { return __a.get() != __b.get(); }
1205
1206 template<typename _Tp, _Lock_policy _Lp>
1207 inline bool
1208 operator!=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1209 { return (bool)__a; }
1210
1211 template<typename _Tp, _Lock_policy _Lp>
1212 inline bool
1213 operator!=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1214 { return (bool)__a; }
1215
1216 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1217 inline bool
1218 operator<(const __shared_ptr<_Tp1, _Lp>& __a,
1219 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1220 {
1221 typedef typename std::common_type<_Tp1*, _Tp2*>::type _CT;
1222 return std::less<_CT>()(__a.get(), __b.get());
1223 }
1224
1225 template<typename _Tp, _Lock_policy _Lp>
1226 inline bool
1227 operator<(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1228 { return std::less<_Tp*>()(__a.get(), nullptr); }
1229
1230 template<typename _Tp, _Lock_policy _Lp>
1231 inline bool
1232 operator<(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1233 { return std::less<_Tp*>()(nullptr, __a.get()); }
1234
1235 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1236 inline bool
1237 operator<=(const __shared_ptr<_Tp1, _Lp>& __a,
1238 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1239 { return !(__b < __a); }
1240
1241 template<typename _Tp, _Lock_policy _Lp>
1242 inline bool
1243 operator<=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1244 { return !(nullptr < __a); }
1245
1246 template<typename _Tp, _Lock_policy _Lp>
1247 inline bool
1248 operator<=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1249 { return !(__a < nullptr); }
1250
1251 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1252 inline bool
1253 operator>(const __shared_ptr<_Tp1, _Lp>& __a,
1254 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1255 { return (__b < __a); }
1256
1257 template<typename _Tp, _Lock_policy _Lp>
1258 inline bool
1259 operator>(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1260 { return std::less<_Tp*>()(nullptr, __a.get()); }
1261
1262 template<typename _Tp, _Lock_policy _Lp>
1263 inline bool
1264 operator>(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1265 { return std::less<_Tp*>()(__a.get(), nullptr); }
1266
1267 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1268 inline bool
1269 operator>=(const __shared_ptr<_Tp1, _Lp>& __a,
1270 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1271 { return !(__a < __b); }
1272
1273 template<typename _Tp, _Lock_policy _Lp>
1274 inline bool
1275 operator>=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1276 { return !(__a < nullptr); }
1277
1278 template<typename _Tp, _Lock_policy _Lp>
1279 inline bool
1280 operator>=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1281 { return !(nullptr < __a); }
1282
1283 template<typename _Sp>
1284 struct _Sp_less : public binary_function<_Sp, _Sp, bool>
1285 {
1286 bool
1287 operator()(const _Sp& __lhs, const _Sp& __rhs) const noexcept
1288 {
1289 typedef typename _Sp::element_type element_type;
1290 return std::less<element_type*>()(__lhs.get(), __rhs.get());
1291 }
1292 };
1293
1294 template<typename _Tp, _Lock_policy _Lp>
1295 struct less<__shared_ptr<_Tp, _Lp>>
1296 : public _Sp_less<__shared_ptr<_Tp, _Lp>>
1297 { };
1298
1299 // 20.7.2.2.8 shared_ptr specialized algorithms.
1300 template<typename _Tp, _Lock_policy _Lp>
1301 inline void
1302 swap(__shared_ptr<_Tp, _Lp>& __a, __shared_ptr<_Tp, _Lp>& __b) noexcept
1303 { __a.swap(__b); }
1304
1305 // 20.7.2.2.9 shared_ptr casts
1306
1307 // The seemingly equivalent code:
1308 // shared_ptr<_Tp, _Lp>(static_cast<_Tp*>(__r.get()))
1309 // will eventually result in undefined behaviour, attempting to
1310 // delete the same object twice.
1311 /// static_pointer_cast
1312 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1313 inline __shared_ptr<_Tp, _Lp>
1314 static_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1315 { return __shared_ptr<_Tp, _Lp>(__r, static_cast<_Tp*>(__r.get())); }
1316
1317 // The seemingly equivalent code:
1318 // shared_ptr<_Tp, _Lp>(const_cast<_Tp*>(__r.get()))
1319 // will eventually result in undefined behaviour, attempting to
1320 // delete the same object twice.
1321 /// const_pointer_cast
1322 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1323 inline __shared_ptr<_Tp, _Lp>
1324 const_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1325 { return __shared_ptr<_Tp, _Lp>(__r, const_cast<_Tp*>(__r.get())); }
1326
1327 // The seemingly equivalent code:
1328 // shared_ptr<_Tp, _Lp>(dynamic_cast<_Tp*>(__r.get()))
1329 // will eventually result in undefined behaviour, attempting to
1330 // delete the same object twice.
1331 /// dynamic_pointer_cast
1332 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1333 inline __shared_ptr<_Tp, _Lp>
1334 dynamic_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1335 {
1336 if (_Tp* __p = dynamic_cast<_Tp*>(__r.get()))
1337 return __shared_ptr<_Tp, _Lp>(__r, __p);
1338 return __shared_ptr<_Tp, _Lp>();
1339 }
1340
1341
1342 template<typename _Tp, _Lock_policy _Lp>
1343 class __weak_ptr
1344 {
1345 template<typename _Ptr>
1346 using _Convertible
1347 = typename enable_if<is_convertible<_Ptr, _Tp*>::value>::type;
1348
1349 public:
1350 typedef _Tp element_type;
1351
1352 constexpr __weak_ptr() noexcept
1353 : _M_ptr(nullptr), _M_refcount()
1354 { }
1355
1356 __weak_ptr(const __weak_ptr&) noexcept = default;
1357
1358 ~__weak_ptr() = default;
1359
1360 // The "obvious" converting constructor implementation:
1361 //
1362 // template<typename _Tp1>
1363 // __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r)
1364 // : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws
1365 // { }
1366 //
1367 // has a serious problem.
1368 //
1369 // __r._M_ptr may already have been invalidated. The _M_ptr(__r._M_ptr)
1370 // conversion may require access to *__r._M_ptr (virtual inheritance).
1371 //
1372 // It is not possible to avoid spurious access violations since
1373 // in multithreaded programs __r._M_ptr may be invalidated at any point.
1374 template<typename _Tp1, typename = _Convertible<_Tp1*>>
1375 __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r) noexcept
1376 : _M_refcount(__r._M_refcount)
1377 { _M_ptr = __r.lock().get(); }
1378
1379 template<typename _Tp1, typename = _Convertible<_Tp1*>>
1380 __weak_ptr(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1381 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
1382 { }
1383
1384 __weak_ptr(__weak_ptr&& __r) noexcept
1385 : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
1386 { __r._M_ptr = nullptr; }
1387
1388 template<typename _Tp1, typename = _Convertible<_Tp1*>>
1389 __weak_ptr(__weak_ptr<_Tp1, _Lp>&& __r) noexcept
1390 : _M_ptr(__r.lock().get()), _M_refcount(std::move(__r._M_refcount))
1391 { __r._M_ptr = nullptr; }
1392
1393 __weak_ptr&
1394 operator=(const __weak_ptr& __r) noexcept = default;
1395
1396 template<typename _Tp1>
1397 __weak_ptr&
1398 operator=(const __weak_ptr<_Tp1, _Lp>& __r) noexcept
1399 {
1400 _M_ptr = __r.lock().get();
1401 _M_refcount = __r._M_refcount;
1402 return *this;
1403 }
1404
1405 template<typename _Tp1>
1406 __weak_ptr&
1407 operator=(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1408 {
1409 _M_ptr = __r._M_ptr;
1410 _M_refcount = __r._M_refcount;
1411 return *this;
1412 }
1413
1414 __weak_ptr&
1415 operator=(__weak_ptr&& __r) noexcept
1416 {
1417 _M_ptr = __r._M_ptr;
1418 _M_refcount = std::move(__r._M_refcount);
1419 __r._M_ptr = nullptr;
1420 return *this;
1421 }
1422
1423 template<typename _Tp1>
1424 __weak_ptr&
1425 operator=(__weak_ptr<_Tp1, _Lp>&& __r) noexcept
1426 {
1427 _M_ptr = __r.lock().get();
1428 _M_refcount = std::move(__r._M_refcount);
1429 __r._M_ptr = nullptr;
1430 return *this;
1431 }
1432
1433 __shared_ptr<_Tp, _Lp>
1434 lock() const noexcept
1435 { return __shared_ptr<element_type, _Lp>(*this, std::nothrow); }
1436
1437 long
1438 use_count() const noexcept
1439 { return _M_refcount._M_get_use_count(); }
1440
1441 bool
1442 expired() const noexcept
1443 { return _M_refcount._M_get_use_count() == 0; }
1444
1445 template<typename _Tp1>
1446 bool
1447 owner_before(const __shared_ptr<_Tp1, _Lp>& __rhs) const
1448 { return _M_refcount._M_less(__rhs._M_refcount); }
1449
1450 template<typename _Tp1>
1451 bool
1452 owner_before(const __weak_ptr<_Tp1, _Lp>& __rhs) const
1453 { return _M_refcount._M_less(__rhs._M_refcount); }
1454
1455 void
1456 reset() noexcept
1457 { __weak_ptr().swap(*this); }
1458
1459 void
1460 swap(__weak_ptr& __s) noexcept
1461 {
1462 std::swap(_M_ptr, __s._M_ptr);
1463 _M_refcount._M_swap(__s._M_refcount);
1464 }
1465
1466 private:
1467 // Used by __enable_shared_from_this.
1468 void
1469 _M_assign(_Tp* __ptr, const __shared_count<_Lp>& __refcount) noexcept
1470 {
1471 if (use_count() == 0)
1472 {
1473 _M_ptr = __ptr;
1474 _M_refcount = __refcount;
1475 }
1476 }
1477
1478 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1479 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1480 friend class __enable_shared_from_this<_Tp, _Lp>;
1481 friend class enable_shared_from_this<_Tp>;
1482
1483 _Tp* _M_ptr; // Contained pointer.
1484 __weak_count<_Lp> _M_refcount; // Reference counter.
1485 };
1486
1487 // 20.7.2.3.6 weak_ptr specialized algorithms.
1488 template<typename _Tp, _Lock_policy _Lp>
1489 inline void
1490 swap(__weak_ptr<_Tp, _Lp>& __a, __weak_ptr<_Tp, _Lp>& __b) noexcept
1491 { __a.swap(__b); }
1492
1493 template<typename _Tp, typename _Tp1>
1494 struct _Sp_owner_less : public binary_function<_Tp, _Tp, bool>
1495 {
1496 bool
1497 operator()(const _Tp& __lhs, const _Tp& __rhs) const
1498 { return __lhs.owner_before(__rhs); }
1499
1500 bool
1501 operator()(const _Tp& __lhs, const _Tp1& __rhs) const
1502 { return __lhs.owner_before(__rhs); }
1503
1504 bool
1505 operator()(const _Tp1& __lhs, const _Tp& __rhs) const
1506 { return __lhs.owner_before(__rhs); }
1507 };
1508
1509 template<typename _Tp, _Lock_policy _Lp>
1510 struct owner_less<__shared_ptr<_Tp, _Lp>>
1511 : public _Sp_owner_less<__shared_ptr<_Tp, _Lp>, __weak_ptr<_Tp, _Lp>>
1512 { };
1513
1514 template<typename _Tp, _Lock_policy _Lp>
1515 struct owner_less<__weak_ptr<_Tp, _Lp>>
1516 : public _Sp_owner_less<__weak_ptr<_Tp, _Lp>, __shared_ptr<_Tp, _Lp>>
1517 { };
1518
1519
1520 template<typename _Tp, _Lock_policy _Lp>
1521 class __enable_shared_from_this
1522 {
1523 protected:
1524 constexpr __enable_shared_from_this() noexcept { }
1525
1526 __enable_shared_from_this(const __enable_shared_from_this&) noexcept { }
1527
1528 __enable_shared_from_this&
1529 operator=(const __enable_shared_from_this&) noexcept
1530 { return *this; }
1531
1532 ~__enable_shared_from_this() { }
1533
1534 public:
1535 __shared_ptr<_Tp, _Lp>
1536 shared_from_this()
1537 { return __shared_ptr<_Tp, _Lp>(this->_M_weak_this); }
1538
1539 __shared_ptr<const _Tp, _Lp>
1540 shared_from_this() const
1541 { return __shared_ptr<const _Tp, _Lp>(this->_M_weak_this); }
1542
1543 private:
1544 template<typename _Tp1>
1545 void
1546 _M_weak_assign(_Tp1* __p, const __shared_count<_Lp>& __n) const noexcept
1547 { _M_weak_this._M_assign(__p, __n); }
1548
1549 template<_Lock_policy _Lp1, typename _Tp1, typename _Tp2>
1550 friend void
1551 __enable_shared_from_this_helper(const __shared_count<_Lp1>&,
1552 const __enable_shared_from_this<_Tp1,
1553 _Lp1>*, const _Tp2*) noexcept;
1554
1555 mutable __weak_ptr<_Tp, _Lp> _M_weak_this;
1556 };
1557
1558 template<_Lock_policy _Lp1, typename _Tp1, typename _Tp2>
1559 inline void
1560 __enable_shared_from_this_helper(const __shared_count<_Lp1>& __pn,
1561 const __enable_shared_from_this<_Tp1,
1562 _Lp1>* __pe,
1563 const _Tp2* __px) noexcept
1564 {
1565 if (__pe != nullptr)
1566 __pe->_M_weak_assign(const_cast<_Tp2*>(__px), __pn);
1567 }
1568
1569 template<typename _Tp, _Lock_policy _Lp, typename _Alloc, typename... _Args>
1570 inline __shared_ptr<_Tp, _Lp>
1571 __allocate_shared(const _Alloc& __a, _Args&&... __args)
1572 {
1573 return __shared_ptr<_Tp, _Lp>(_Sp_make_shared_tag(), __a,
1574 std::forward<_Args>(__args)...);
1575 }
1576
1577 template<typename _Tp, _Lock_policy _Lp, typename... _Args>
1578 inline __shared_ptr<_Tp, _Lp>
1579 __make_shared(_Args&&... __args)
1580 {
1581 typedef typename std::remove_const<_Tp>::type _Tp_nc;
1582 return std::__allocate_shared<_Tp, _Lp>(std::allocator<_Tp_nc>(),
1583 std::forward<_Args>(__args)...);
1584 }
1585
1586 /// std::hash specialization for __shared_ptr.
1587 template<typename _Tp, _Lock_policy _Lp>
1588 struct hash<__shared_ptr<_Tp, _Lp>>
1589 : public __hash_base<size_t, __shared_ptr<_Tp, _Lp>>
1590 {
1591 size_t
1592 operator()(const __shared_ptr<_Tp, _Lp>& __s) const noexcept
1593 { return std::hash<_Tp*>()(__s.get()); }
1594 };
1595
1596_GLIBCXX_END_NAMESPACE_VERSION
1597} // namespace
1598
1599#endif // _SHARED_PTR_BASE_H

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/bits/alloc_traits.h

1// Allocator traits -*- C++ -*-
2
3// Copyright (C) 2011-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/alloc_traits.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _ALLOC_TRAITS_H1
31#define _ALLOC_TRAITS_H1 1
32
33#if __cplusplus201402L >= 201103L
34
35#include <bits/memoryfwd.h>
36#include <bits/ptr_traits.h>
37#include <ext/numeric_traits.h>
38
39#define __cpp_lib_allocator_traits_is_always_equal201411 201411
40
41namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 struct __allocator_traits_base
46 {
47 template<typename _Tp, typename _Up, typename = void>
48 struct __rebind : __replace_first_arg<_Tp, _Up> { };
49
50 template<typename _Tp, typename _Up>
51 struct __rebind<_Tp, _Up,
52 __void_t<typename _Tp::template rebind<_Up>::other>>
53 { using type = typename _Tp::template rebind<_Up>::other; };
54
55 protected:
56 template<typename _Tp>
57 using __pointer = typename _Tp::pointer;
58 template<typename _Tp>
59 using __c_pointer = typename _Tp::const_pointer;
60 template<typename _Tp>
61 using __v_pointer = typename _Tp::void_pointer;
62 template<typename _Tp>
63 using __cv_pointer = typename _Tp::const_void_pointer;
64 template<typename _Tp>
65 using __pocca = typename _Tp::propagate_on_container_copy_assignment;
66 template<typename _Tp>
67 using __pocma = typename _Tp::propagate_on_container_move_assignment;
68 template<typename _Tp>
69 using __pocs = typename _Tp::propagate_on_container_swap;
70 template<typename _Tp>
71 using __equal = typename _Tp::is_always_equal;
72 };
73
74 template<typename _Alloc, typename _Up>
75 using __alloc_rebind
76 = typename __allocator_traits_base::template __rebind<_Alloc, _Up>::type;
77
78 /**
79 * @brief Uniform interface to all allocator types.
80 * @ingroup allocators
81 */
82 template<typename _Alloc>
83 struct allocator_traits : __allocator_traits_base
84 {
85 /// The allocator type
86 typedef _Alloc allocator_type;
87 /// The allocated type
88 typedef typename _Alloc::value_type value_type;
89
90 /**
91 * @brief The allocator's pointer type.
92 *
93 * @c Alloc::pointer if that type exists, otherwise @c value_type*
94 */
95 using pointer = __detected_or_t<value_type*, __pointer, _Alloc>;
96
97 private:
98 // Select _Func<_Alloc> or pointer_traits<pointer>::rebind<_Tp>
99 template<template<typename> class _Func, typename _Tp, typename = void>
100 struct _Ptr
101 {
102 using type = typename pointer_traits<pointer>::template rebind<_Tp>;
103 };
104
105 template<template<typename> class _Func, typename _Tp>
106 struct _Ptr<_Func, _Tp, __void_t<_Func<_Alloc>>>
107 {
108 using type = _Func<_Alloc>;
109 };
110
111 // Select _A2::difference_type or pointer_traits<_Ptr>::difference_type
112 template<typename _A2, typename _PtrT, typename = void>
113 struct _Diff
114 { using type = typename pointer_traits<_PtrT>::difference_type; };
115
116 template<typename _A2, typename _PtrT>
117 struct _Diff<_A2, _PtrT, __void_t<typename _A2::difference_type>>
118 { using type = typename _A2::difference_type; };
119
120 // Select _A2::size_type or make_unsigned<_DiffT>::type
121 template<typename _A2, typename _DiffT, typename = void>
122 struct _Size : make_unsigned<_DiffT> { };
123
124 template<typename _A2, typename _DiffT>
125 struct _Size<_A2, _DiffT, __void_t<typename _A2::size_type>>
126 { using type = typename _A2::size_type; };
127
128 public:
129 /**
130 * @brief The allocator's const pointer type.
131 *
132 * @c Alloc::const_pointer if that type exists, otherwise
133 * <tt> pointer_traits<pointer>::rebind<const value_type> </tt>
134 */
135 using const_pointer = typename _Ptr<__c_pointer, const value_type>::type;
136
137 /**
138 * @brief The allocator's void pointer type.
139 *
140 * @c Alloc::void_pointer if that type exists, otherwise
141 * <tt> pointer_traits<pointer>::rebind<void> </tt>
142 */
143 using void_pointer = typename _Ptr<__v_pointer, void>::type;
144
145 /**
146 * @brief The allocator's const void pointer type.
147 *
148 * @c Alloc::const_void_pointer if that type exists, otherwise
149 * <tt> pointer_traits<pointer>::rebind<const void> </tt>
150 */
151 using const_void_pointer = typename _Ptr<__cv_pointer, const void>::type;
152
153 /**
154 * @brief The allocator's difference type
155 *
156 * @c Alloc::difference_type if that type exists, otherwise
157 * <tt> pointer_traits<pointer>::difference_type </tt>
158 */
159 using difference_type = typename _Diff<_Alloc, pointer>::type;
160
161 /**
162 * @brief The allocator's size type
163 *
164 * @c Alloc::size_type if that type exists, otherwise
165 * <tt> make_unsigned<difference_type>::type </tt>
166 */
167 using size_type = typename _Size<_Alloc, difference_type>::type;
168
169 /**
170 * @brief How the allocator is propagated on copy assignment
171 *
172 * @c Alloc::propagate_on_container_copy_assignment if that type exists,
173 * otherwise @c false_type
174 */
175 using propagate_on_container_copy_assignment
176 = __detected_or_t<false_type, __pocca, _Alloc>;
177
178 /**
179 * @brief How the allocator is propagated on move assignment
180 *
181 * @c Alloc::propagate_on_container_move_assignment if that type exists,
182 * otherwise @c false_type
183 */
184 using propagate_on_container_move_assignment
185 = __detected_or_t<false_type, __pocma, _Alloc>;
186
187 /**
188 * @brief How the allocator is propagated on swap
189 *
190 * @c Alloc::propagate_on_container_swap if that type exists,
191 * otherwise @c false_type
192 */
193 using propagate_on_container_swap
194 = __detected_or_t<false_type, __pocs, _Alloc>;
195
196 /**
197 * @brief Whether all instances of the allocator type compare equal.
198 *
199 * @c Alloc::is_always_equal if that type exists,
200 * otherwise @c is_empty<Alloc>::type
201 */
202 using is_always_equal
203 = __detected_or_t<typename is_empty<_Alloc>::type, __equal, _Alloc>;
204
205 template<typename _Tp>
206 using rebind_alloc = __alloc_rebind<_Alloc, _Tp>;
207 template<typename _Tp>
208 using rebind_traits = allocator_traits<rebind_alloc<_Tp>>;
209
210 private:
211 template<typename _Alloc2>
212 static auto
213 _S_allocate(_Alloc2& __a, size_type __n, const_void_pointer __hint, int)
214 -> decltype(__a.allocate(__n, __hint))
215 { return __a.allocate(__n, __hint); }
216
217 template<typename _Alloc2>
218 static pointer
219 _S_allocate(_Alloc2& __a, size_type __n, const_void_pointer, ...)
220 { return __a.allocate(__n); }
221
222 template<typename _Tp, typename... _Args>
223 struct __construct_helper
224 {
225 template<typename _Alloc2,
226 typename = decltype(std::declval<_Alloc2*>()->construct(
227 std::declval<_Tp*>(), std::declval<_Args>()...))>
228 static true_type __test(int);
229
230 template<typename>
231 static false_type __test(...);
232
233 using type = decltype(__test<_Alloc>(0));
234 };
235
236 template<typename _Tp, typename... _Args>
237 using __has_construct
238 = typename __construct_helper<_Tp, _Args...>::type;
239
240 template<typename _Tp, typename... _Args>
241 static _Require<__has_construct<_Tp, _Args...>>
242 _S_construct(_Alloc& __a, _Tp* __p, _Args&&... __args)
243 { __a.construct(__p, std::forward<_Args>(__args)...); }
244
245 template<typename _Tp, typename... _Args>
246 static
247 _Require<__and_<__not_<__has_construct<_Tp, _Args...>>,
248 is_constructible<_Tp, _Args...>>>
249 _S_construct(_Alloc&, _Tp* __p, _Args&&... __args)
250 { ::new((void*)__p) _Tp(std::forward<_Args>(__args)...); }
251
252 template<typename _Alloc2, typename _Tp>
253 static auto
254 _S_destroy(_Alloc2& __a, _Tp* __p, int)
255 -> decltype(__a.destroy(__p))
256 { __a.destroy(__p); }
257
258 template<typename _Alloc2, typename _Tp>
259 static void
260 _S_destroy(_Alloc2&, _Tp* __p, ...)
261 { __p->~_Tp(); }
262
263 template<typename _Alloc2>
264 static auto
265 _S_max_size(_Alloc2& __a, int)
266 -> decltype(__a.max_size())
267 { return __a.max_size(); }
268
269 template<typename _Alloc2>
270 static size_type
271 _S_max_size(_Alloc2&, ...)
272 {
273 // _GLIBCXX_RESOLVE_LIB_DEFECTS
274 // 2466. allocator_traits::max_size() default behavior is incorrect
275 return __gnu_cxx::__numeric_traits<size_type>::__max
276 / sizeof(value_type);
277 }
278
279 template<typename _Alloc2>
280 static auto
281 _S_select(_Alloc2& __a, int)
282 -> decltype(__a.select_on_container_copy_construction())
283 { return __a.select_on_container_copy_construction(); }
284
285 template<typename _Alloc2>
286 static _Alloc2
287 _S_select(_Alloc2& __a, ...)
288 { return __a; }
289
290 public:
291
292 /**
293 * @brief Allocate memory.
294 * @param __a An allocator.
295 * @param __n The number of objects to allocate space for.
296 *
297 * Calls @c a.allocate(n)
298 */
299 static pointer
300 allocate(_Alloc& __a, size_type __n)
301 { return __a.allocate(__n); }
302
303 /**
304 * @brief Allocate memory.
305 * @param __a An allocator.
306 * @param __n The number of objects to allocate space for.
307 * @param __hint Aid to locality.
308 * @return Memory of suitable size and alignment for @a n objects
309 * of type @c value_type
310 *
311 * Returns <tt> a.allocate(n, hint) </tt> if that expression is
312 * well-formed, otherwise returns @c a.allocate(n)
313 */
314 static pointer
315 allocate(_Alloc& __a, size_type __n, const_void_pointer __hint)
316 { return _S_allocate(__a, __n, __hint, 0); }
317
318 /**
319 * @brief Deallocate memory.
320 * @param __a An allocator.
321 * @param __p Pointer to the memory to deallocate.
322 * @param __n The number of objects space was allocated for.
323 *
324 * Calls <tt> a.deallocate(p, n) </tt>
325 */
326 static void
327 deallocate(_Alloc& __a, pointer __p, size_type __n)
328 { __a.deallocate(__p, __n); }
329
330 /**
331 * @brief Construct an object of type @a _Tp
332 * @param __a An allocator.
333 * @param __p Pointer to memory of suitable size and alignment for Tp
334 * @param __args Constructor arguments.
335 *
336 * Calls <tt> __a.construct(__p, std::forward<Args>(__args)...) </tt>
337 * if that expression is well-formed, otherwise uses placement-new
338 * to construct an object of type @a _Tp at location @a __p from the
339 * arguments @a __args...
340 */
341 template<typename _Tp, typename... _Args>
342 static auto construct(_Alloc& __a, _Tp* __p, _Args&&... __args)
343 -> decltype(_S_construct(__a, __p, std::forward<_Args>(__args)...))
344 { _S_construct(__a, __p, std::forward<_Args>(__args)...); }
345
346 /**
347 * @brief Destroy an object of type @a _Tp
348 * @param __a An allocator.
349 * @param __p Pointer to the object to destroy
350 *
351 * Calls @c __a.destroy(__p) if that expression is well-formed,
352 * otherwise calls @c __p->~_Tp()
353 */
354 template<typename _Tp>
355 static void destroy(_Alloc& __a, _Tp* __p)
356 { _S_destroy(__a, __p, 0); }
357
358 /**
359 * @brief The maximum supported allocation size
360 * @param __a An allocator.
361 * @return @c __a.max_size() or @c numeric_limits<size_type>::max()
362 *
363 * Returns @c __a.max_size() if that expression is well-formed,
364 * otherwise returns @c numeric_limits<size_type>::max()
365 */
366 static size_type max_size(const _Alloc& __a) noexcept
367 { return _S_max_size(__a, 0); }
368
369 /**
370 * @brief Obtain an allocator to use when copying a container.
371 * @param __rhs An allocator.
372 * @return @c __rhs.select_on_container_copy_construction() or @a __rhs
373 *
374 * Returns @c __rhs.select_on_container_copy_construction() if that
375 * expression is well-formed, otherwise returns @a __rhs
376 */
377 static _Alloc
378 select_on_container_copy_construction(const _Alloc& __rhs)
379 { return _S_select(__rhs, 0); }
380 };
381
382 /// Partial specialization for std::allocator.
383 template<typename _Tp>
384 struct allocator_traits<allocator<_Tp>>
385 {
386 /// The allocator type
387 using allocator_type = allocator<_Tp>;
388 /// The allocated type
389 using value_type = _Tp;
390
391 /// The allocator's pointer type.
392 using pointer = _Tp*;
393
394 /// The allocator's const pointer type.
395 using const_pointer = const _Tp*;
396
397 /// The allocator's void pointer type.
398 using void_pointer = void*;
399
400 /// The allocator's const void pointer type.
401 using const_void_pointer = const void*;
402
403 /// The allocator's difference type
404 using difference_type = std::ptrdiff_t;
405
406 /// The allocator's size type
407 using size_type = std::size_t;
408
409 /// How the allocator is propagated on copy assignment
410 using propagate_on_container_copy_assignment = false_type;
411
412 /// How the allocator is propagated on move assignment
413 using propagate_on_container_move_assignment = true_type;
414
415 /// How the allocator is propagated on swap
416 using propagate_on_container_swap = false_type;
417
418 /// Whether all instances of the allocator type compare equal.
419 using is_always_equal = true_type;
420
421 template<typename _Up>
422 using rebind_alloc = allocator<_Up>;
423
424 template<typename _Up>
425 using rebind_traits = allocator_traits<allocator<_Up>>;
426
427 /**
428 * @brief Allocate memory.
429 * @param __a An allocator.
430 * @param __n The number of objects to allocate space for.
431 *
432 * Calls @c a.allocate(n)
433 */
434 static pointer
435 allocate(allocator_type& __a, size_type __n)
436 { return __a.allocate(__n); }
437
438 /**
439 * @brief Allocate memory.
440 * @param __a An allocator.
441 * @param __n The number of objects to allocate space for.
442 * @param __hint Aid to locality.
443 * @return Memory of suitable size and alignment for @a n objects
444 * of type @c value_type
445 *
446 * Returns <tt> a.allocate(n, hint) </tt>
447 */
448 static pointer
449 allocate(allocator_type& __a, size_type __n, const_void_pointer __hint)
450 { return __a.allocate(__n, __hint); }
451
452 /**
453 * @brief Deallocate memory.
454 * @param __a An allocator.
455 * @param __p Pointer to the memory to deallocate.
456 * @param __n The number of objects space was allocated for.
457 *
458 * Calls <tt> a.deallocate(p, n) </tt>
459 */
460 static void
461 deallocate(allocator_type& __a, pointer __p, size_type __n)
462 { __a.deallocate(__p, __n); }
463
464 /**
465 * @brief Construct an object of type @a _Up
466 * @param __a An allocator.
467 * @param __p Pointer to memory of suitable size and alignment for Tp
468 * @param __args Constructor arguments.
469 *
470 * Calls <tt> __a.construct(__p, std::forward<Args>(__args)...) </tt>
471 */
472 template<typename _Up, typename... _Args>
473 static void
474 construct(allocator_type& __a, _Up* __p, _Args&&... __args)
475 { __a.construct(__p, std::forward<_Args>(__args)...); }
20
Calling 'new_allocator::construct'
476
477 /**
478 * @brief Destroy an object of type @a _Up
479 * @param __a An allocator.
480 * @param __p Pointer to the object to destroy
481 *
482 * Calls @c __a.destroy(__p).
483 */
484 template<typename _Up>
485 static void
486 destroy(allocator_type& __a, _Up* __p)
487 { __a.destroy(__p); }
488
489 /**
490 * @brief The maximum supported allocation size
491 * @param __a An allocator.
492 * @return @c __a.max_size()
493 */
494 static size_type
495 max_size(const allocator_type& __a) noexcept
496 { return __a.max_size(); }
497
498 /**
499 * @brief Obtain an allocator to use when copying a container.
500 * @param __rhs An allocator.
501 * @return @c __rhs
502 */
503 static allocator_type
504 select_on_container_copy_construction(const allocator_type& __rhs)
505 { return __rhs; }
506 };
507
508
509 template<typename _Alloc>
510 inline void
511 __do_alloc_on_copy(_Alloc& __one, const _Alloc& __two, true_type)
512 { __one = __two; }
513
514 template<typename _Alloc>
515 inline void
516 __do_alloc_on_copy(_Alloc&, const _Alloc&, false_type)
517 { }
518
519 template<typename _Alloc>
520 inline void __alloc_on_copy(_Alloc& __one, const _Alloc& __two)
521 {
522 typedef allocator_traits<_Alloc> __traits;
523 typedef typename __traits::propagate_on_container_copy_assignment __pocca;
524 __do_alloc_on_copy(__one, __two, __pocca());
525 }
526
527 template<typename _Alloc>
528 inline _Alloc __alloc_on_copy(const _Alloc& __a)
529 {
530 typedef allocator_traits<_Alloc> __traits;
531 return __traits::select_on_container_copy_construction(__a);
532 }
533
534 template<typename _Alloc>
535 inline void __do_alloc_on_move(_Alloc& __one, _Alloc& __two, true_type)
536 { __one = std::move(__two); }
537
538 template<typename _Alloc>
539 inline void __do_alloc_on_move(_Alloc&, _Alloc&, false_type)
540 { }
541
542 template<typename _Alloc>
543 inline void __alloc_on_move(_Alloc& __one, _Alloc& __two)
544 {
545 typedef allocator_traits<_Alloc> __traits;
546 typedef typename __traits::propagate_on_container_move_assignment __pocma;
547 __do_alloc_on_move(__one, __two, __pocma());
548 }
549
550 template<typename _Alloc>
551 inline void __do_alloc_on_swap(_Alloc& __one, _Alloc& __two, true_type)
552 {
553 using std::swap;
554 swap(__one, __two);
555 }
556
557 template<typename _Alloc>
558 inline void __do_alloc_on_swap(_Alloc&, _Alloc&, false_type)
559 { }
560
561 template<typename _Alloc>
562 inline void __alloc_on_swap(_Alloc& __one, _Alloc& __two)
563 {
564 typedef allocator_traits<_Alloc> __traits;
565 typedef typename __traits::propagate_on_container_swap __pocs;
566 __do_alloc_on_swap(__one, __two, __pocs());
567 }
568
569 template<typename _Alloc>
570 class __is_copy_insertable_impl
571 {
572 typedef allocator_traits<_Alloc> _Traits;
573
574 template<typename _Up, typename
575 = decltype(_Traits::construct(std::declval<_Alloc&>(),
576 std::declval<_Up*>(),
577 std::declval<const _Up&>()))>
578 static true_type
579 _M_select(int);
580
581 template<typename _Up>
582 static false_type
583 _M_select(...);
584
585 public:
586 typedef decltype(_M_select<typename _Alloc::value_type>(0)) type;
587 };
588
589 // true if _Alloc::value_type is CopyInsertable into containers using _Alloc
590 template<typename _Alloc>
591 struct __is_copy_insertable
592 : __is_copy_insertable_impl<_Alloc>::type
593 { };
594
595 // std::allocator<_Tp> just requires CopyConstructible
596 template<typename _Tp>
597 struct __is_copy_insertable<allocator<_Tp>>
598 : is_copy_constructible<_Tp>
599 { };
600
601_GLIBCXX_END_NAMESPACE_VERSION
602} // namespace std
603
604#endif
605#endif

/usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/ext/new_allocator.h

1// Allocator that wraps operator new -*- C++ -*-
2
3// Copyright (C) 2001-2016 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file ext/new_allocator.h
26 * This file is a GNU extension to the Standard C++ Library.
27 */
28
29#ifndef _NEW_ALLOCATOR_H1
30#define _NEW_ALLOCATOR_H1 1
31
32#include <bits/c++config.h>
33#include <new>
34#include <bits/functexcept.h>
35#include <bits/move.h>
36#if __cplusplus201402L >= 201103L
37#include <type_traits>
38#endif
39
40namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
41{
42_GLIBCXX_BEGIN_NAMESPACE_VERSION
43
44 using std::size_t;
45 using std::ptrdiff_t;
46
47 /**
48 * @brief An allocator that uses global new, as per [20.4].
49 * @ingroup allocators
50 *
51 * This is precisely the allocator defined in the C++ Standard.
52 * - all allocation calls operator new
53 * - all deallocation calls operator delete
54 *
55 * @tparam _Tp Type of allocated object.
56 */
57 template<typename _Tp>
58 class new_allocator
59 {
60 public:
61 typedef size_t size_type;
62 typedef ptrdiff_t difference_type;
63 typedef _Tp* pointer;
64 typedef const _Tp* const_pointer;
65 typedef _Tp& reference;
66 typedef const _Tp& const_reference;
67 typedef _Tp value_type;
68
69 template<typename _Tp1>
70 struct rebind
71 { typedef new_allocator<_Tp1> other; };
72
73#if __cplusplus201402L >= 201103L
74 // _GLIBCXX_RESOLVE_LIB_DEFECTS
75 // 2103. propagate_on_container_move_assignment
76 typedef std::true_type propagate_on_container_move_assignment;
77#endif
78
79 new_allocator() _GLIBCXX_USE_NOEXCEPTnoexcept { }
80
81 new_allocator(const new_allocator&) _GLIBCXX_USE_NOEXCEPTnoexcept { }
82
83 template<typename _Tp1>
84 new_allocator(const new_allocator<_Tp1>&) _GLIBCXX_USE_NOEXCEPTnoexcept { }
85
86 ~new_allocator() _GLIBCXX_USE_NOEXCEPTnoexcept { }
87
88 pointer
89 address(reference __x) const _GLIBCXX_NOEXCEPTnoexcept
90 { return std::__addressof(__x); }
91
92 const_pointer
93 address(const_reference __x) const _GLIBCXX_NOEXCEPTnoexcept
94 { return std::__addressof(__x); }
95
96 // NB: __n is permitted to be 0. The C++ standard says nothing
97 // about what the return value is when __n == 0.
98 pointer
99 allocate(size_type __n, const void* = 0)
100 {
101 if (__n > this->max_size())
102 std::__throw_bad_alloc();
103
104 return static_cast<_Tp*>(::operator new(__n * sizeof(_Tp)));
105 }
106
107 // __p is not permitted to be a null pointer.
108 void
109 deallocate(pointer __p, size_type)
110 { ::operator delete(__p); }
111
112 size_type
113 max_size() const _GLIBCXX_USE_NOEXCEPTnoexcept
114 { return size_t(-1) / sizeof(_Tp); }
115
116#if __cplusplus201402L >= 201103L
117 template<typename _Up, typename... _Args>
118 void
119 construct(_Up* __p, _Args&&... __args)
120 { ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
21
Calling constructor for 'PoolEntry'
121
122 template<typename _Up>
123 void
124 destroy(_Up* __p) { __p->~_Up(); }
125#else
126 // _GLIBCXX_RESOLVE_LIB_DEFECTS
127 // 402. wrong new expression in [some_] allocator::construct
128 void
129 construct(pointer __p, const _Tp& __val)
130 { ::new((void *)__p) _Tp(__val); }
131
132 void
133 destroy(pointer __p) { __p->~_Tp(); }
134#endif
135 };
136
137 template<typename _Tp>
138 inline bool
139 operator==(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
140 { return true; }
141
142 template<typename _Tp>
143 inline bool
144 operator!=(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
145 { return false; }
146
147_GLIBCXX_END_NAMESPACE_VERSION
148} // namespace
149
150#endif

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h

1//===- Math.h - PBQP Vector and Matrix classes ------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_CODEGEN_PBQP_MATH_H
10#define LLVM_CODEGEN_PBQP_MATH_H
11
12#include "llvm/ADT/Hashing.h"
13#include "llvm/ADT/STLExtras.h"
14#include <algorithm>
15#include <cassert>
16#include <functional>
17#include <memory>
18
19namespace llvm {
20namespace PBQP {
21
22using PBQPNum = float;
23
24/// PBQP Vector class.
25class Vector {
26 friend hash_code hash_value(const Vector &);
27
28public:
29 /// Construct a PBQP vector of the given size.
30 explicit Vector(unsigned Length)
31 : Length(Length), Data(std::make_unique<PBQPNum []>(Length)) {}
32
33 /// Construct a PBQP vector with initializer.
34 Vector(unsigned Length, PBQPNum InitVal)
35 : Length(Length), Data(std::make_unique<PBQPNum []>(Length)) {
36 std::fill(Data.get(), Data.get() + Length, InitVal);
37 }
38
39 /// Copy construct a PBQP vector.
40 Vector(const Vector &V)
41 : Length(V.Length), Data(std::make_unique<PBQPNum []>(Length)) {
42 std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
43 }
44
45 /// Move construct a PBQP vector.
46 Vector(Vector &&V)
47 : Length(V.Length), Data(std::move(V.Data)) {
48 V.Length = 0;
49 }
50
51 /// Comparison operator.
52 bool operator==(const Vector &V) const {
53 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 53, __PRETTY_FUNCTION__))
;
54 if (Length != V.Length)
55 return false;
56 return std::equal(Data.get(), Data.get() + Length, V.Data.get());
57 }
58
59 /// Return the length of the vector
60 unsigned getLength() const {
61 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 61, __PRETTY_FUNCTION__))
;
62 return Length;
63 }
64
65 /// Element access.
66 PBQPNum& operator[](unsigned Index) {
67 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 67, __PRETTY_FUNCTION__))
;
68 assert(Index < Length && "Vector element access out of bounds.")((Index < Length && "Vector element access out of bounds."
) ? static_cast<void> (0) : __assert_fail ("Index < Length && \"Vector element access out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 68, __PRETTY_FUNCTION__))
;
69 return Data[Index];
70 }
71
72 /// Const element access.
73 const PBQPNum& operator[](unsigned Index) const {
74 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 74, __PRETTY_FUNCTION__))
;
75 assert(Index < Length && "Vector element access out of bounds.")((Index < Length && "Vector element access out of bounds."
) ? static_cast<void> (0) : __assert_fail ("Index < Length && \"Vector element access out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 75, __PRETTY_FUNCTION__))
;
76 return Data[Index];
77 }
78
79 /// Add another vector to this one.
80 Vector& operator+=(const Vector &V) {
81 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 81, __PRETTY_FUNCTION__))
;
82 assert(Length == V.Length && "Vector length mismatch.")((Length == V.Length && "Vector length mismatch.") ? static_cast
<void> (0) : __assert_fail ("Length == V.Length && \"Vector length mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 82, __PRETTY_FUNCTION__))
;
83 std::transform(Data.get(), Data.get() + Length, V.Data.get(), Data.get(),
84 std::plus<PBQPNum>());
85 return *this;
86 }
87
88 /// Returns the index of the minimum value in this vector
89 unsigned minIndex() const {
90 assert(Length != 0 && Data && "Invalid vector")((Length != 0 && Data && "Invalid vector") ? static_cast
<void> (0) : __assert_fail ("Length != 0 && Data && \"Invalid vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 90, __PRETTY_FUNCTION__))
;
91 return std::min_element(Data.get(), Data.get() + Length) - Data.get();
92 }
93
94private:
95 unsigned Length;
96 std::unique_ptr<PBQPNum []> Data;
97};
98
99/// Return a hash_value for the given vector.
100inline hash_code hash_value(const Vector &V) {
101 unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
102 unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
103 return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
104}
105
106/// Output a textual representation of the given vector on the given
107/// output stream.
108template <typename OStream>
109OStream& operator<<(OStream &OS, const Vector &V) {
110 assert((V.getLength() != 0) && "Zero-length vector badness.")(((V.getLength() != 0) && "Zero-length vector badness."
) ? static_cast<void> (0) : __assert_fail ("(V.getLength() != 0) && \"Zero-length vector badness.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 110, __PRETTY_FUNCTION__))
;
111
112 OS << "[ " << V[0];
113 for (unsigned i = 1; i < V.getLength(); ++i)
114 OS << ", " << V[i];
115 OS << " ]";
116
117 return OS;
118}
119
120/// PBQP Matrix class
121class Matrix {
122private:
123 friend hash_code hash_value(const Matrix &);
124
125public:
126 /// Construct a PBQP Matrix with the given dimensions.
127 Matrix(unsigned Rows, unsigned Cols) :
128 Rows(Rows), Cols(Cols), Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
129 }
130
131 /// Construct a PBQP Matrix with the given dimensions and initial
132 /// value.
133 Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
134 : Rows(Rows), Cols(Cols),
135 Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
136 std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
137 }
138
139 /// Copy construct a PBQP matrix.
140 Matrix(const Matrix &M)
141 : Rows(M.Rows), Cols(M.Cols),
142 Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
143 std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
144 }
145
146 /// Move construct a PBQP matrix.
147 Matrix(Matrix &&M)
148 : Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
149 M.Rows = M.Cols = 0;
150 }
151
152 /// Comparison operator.
153 bool operator==(const Matrix &M) const {
154 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 154, __PRETTY_FUNCTION__))
;
155 if (Rows != M.Rows || Cols != M.Cols)
156 return false;
157 return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
158 }
159
160 /// Return the number of rows in this matrix.
161 unsigned getRows() const {
162 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 162, __PRETTY_FUNCTION__))
;
163 return Rows;
164 }
165
166 /// Return the number of cols in this matrix.
167 unsigned getCols() const {
168 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 168, __PRETTY_FUNCTION__))
;
169 return Cols;
170 }
171
172 /// Matrix element access.
173 PBQPNum* operator[](unsigned R) {
174 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 174, __PRETTY_FUNCTION__))
;
175 assert(R < Rows && "Row out of bounds.")((R < Rows && "Row out of bounds.") ? static_cast<
void> (0) : __assert_fail ("R < Rows && \"Row out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 175, __PRETTY_FUNCTION__))
;
176 return Data.get() + (R * Cols);
177 }
178
179 /// Matrix element access.
180 const PBQPNum* operator[](unsigned R) const {
181 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 181, __PRETTY_FUNCTION__))
;
182 assert(R < Rows && "Row out of bounds.")((R < Rows && "Row out of bounds.") ? static_cast<
void> (0) : __assert_fail ("R < Rows && \"Row out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 182, __PRETTY_FUNCTION__))
;
183 return Data.get() + (R * Cols);
184 }
185
186 /// Returns the given row as a vector.
187 Vector getRowAsVector(unsigned R) const {
188 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 188, __PRETTY_FUNCTION__))
;
189 Vector V(Cols);
190 for (unsigned C = 0; C < Cols; ++C)
191 V[C] = (*this)[R][C];
192 return V;
193 }
194
195 /// Returns the given column as a vector.
196 Vector getColAsVector(unsigned C) const {
197 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 197, __PRETTY_FUNCTION__))
;
198 Vector V(Rows);
199 for (unsigned R = 0; R < Rows; ++R)
200 V[R] = (*this)[R][C];
201 return V;
202 }
203
204 /// Matrix transpose.
205 Matrix transpose() const {
206 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 206, __PRETTY_FUNCTION__))
;
207 Matrix M(Cols, Rows);
208 for (unsigned r = 0; r < Rows; ++r)
209 for (unsigned c = 0; c < Cols; ++c)
210 M[c][r] = (*this)[r][c];
211 return M;
212 }
213
214 /// Add the given matrix to this one.
215 Matrix& operator+=(const Matrix &M) {
216 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 216, __PRETTY_FUNCTION__))
;
217 assert(Rows == M.Rows && Cols == M.Cols &&((Rows == M.Rows && Cols == M.Cols && "Matrix dimensions mismatch."
) ? static_cast<void> (0) : __assert_fail ("Rows == M.Rows && Cols == M.Cols && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 218, __PRETTY_FUNCTION__))
218 "Matrix dimensions mismatch.")((Rows == M.Rows && Cols == M.Cols && "Matrix dimensions mismatch."
) ? static_cast<void> (0) : __assert_fail ("Rows == M.Rows && Cols == M.Cols && \"Matrix dimensions mismatch.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 218, __PRETTY_FUNCTION__))
;
219 std::transform(Data.get(), Data.get() + (Rows * Cols), M.Data.get(),
220 Data.get(), std::plus<PBQPNum>());
221 return *this;
222 }
223
224 Matrix operator+(const Matrix &M) {
225 assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix")((Rows != 0 && Cols != 0 && Data && "Invalid matrix"
) ? static_cast<void> (0) : __assert_fail ("Rows != 0 && Cols != 0 && Data && \"Invalid matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 225, __PRETTY_FUNCTION__))
;
226 Matrix Tmp(*this);
227 Tmp += M;
228 return Tmp;
229 }
230
231private:
232 unsigned Rows, Cols;
233 std::unique_ptr<PBQPNum []> Data;
234};
235
236/// Return a hash_code for the given matrix.
237inline hash_code hash_value(const Matrix &M) {
238 unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
239 unsigned *MEnd =
240 reinterpret_cast<unsigned*>(M.Data.get() + (M.Rows * M.Cols));
241 return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
242}
243
244/// Output a textual representation of the given matrix on the given
245/// output stream.
246template <typename OStream>
247OStream& operator<<(OStream &OS, const Matrix &M) {
248 assert((M.getRows() != 0) && "Zero-row matrix badness.")(((M.getRows() != 0) && "Zero-row matrix badness.") ?
static_cast<void> (0) : __assert_fail ("(M.getRows() != 0) && \"Zero-row matrix badness.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/PBQP/Math.h"
, 248, __PRETTY_FUNCTION__))
;
249 for (unsigned i = 0; i < M.getRows(); ++i)
250 OS << M.getRowAsVector(i) << "\n";
251 return OS;
252}
253
254template <typename Metadata>
255class MDVector : public Vector {
256public:
257 MDVector(const Vector &v) : Vector(v), md(*this) {}
258 MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
259
260 const Metadata& getMetadata() const { return md; }
261
262private:
263 Metadata md;
264};
265
266template <typename Metadata>
267inline hash_code hash_value(const MDVector<Metadata> &V) {
268 return hash_value(static_cast<const Vector&>(V));
269}
270
271template <typename Metadata>
272class MDMatrix : public Matrix {
273public:
274 MDMatrix(const Matrix &m) : Matrix(m), md(*this) {}
275 MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
23
Calling constructor for 'MatrixMetadata'
276
277 const Metadata& getMetadata() const { return md; }
278
279private:
280 Metadata md;
281};
282
283template <typename Metadata>
284inline hash_code hash_value(const MDMatrix<Metadata> &M) {
285 return hash_value(static_cast<const Matrix&>(M));
286}
287
288} // end namespace PBQP
289} // end namespace llvm
290
291#endif // LLVM_CODEGEN_PBQP_MATH_H

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h

1//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the PBQPBuilder interface, for classes which build PBQP
10// instances to represent register allocation problems, and the RegAllocPBQP
11// interface.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
16#define LLVM_CODEGEN_REGALLOCPBQP_H
17
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/CodeGen/PBQP/CostAllocator.h"
21#include "llvm/CodeGen/PBQP/Graph.h"
22#include "llvm/CodeGen/PBQP/Math.h"
23#include "llvm/CodeGen/PBQP/ReductionRules.h"
24#include "llvm/CodeGen/PBQP/Solution.h"
25#include "llvm/CodeGen/Register.h"
26#include "llvm/MC/MCRegister.h"
27#include "llvm/Support/ErrorHandling.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <limits>
32#include <memory>
33#include <set>
34#include <vector>
35
36namespace llvm {
37
38class FunctionPass;
39class LiveIntervals;
40class MachineBlockFrequencyInfo;
41class MachineFunction;
42class raw_ostream;
43
44namespace PBQP {
45namespace RegAlloc {
46
47/// Spill option index.
48inline unsigned getSpillOptionIdx() { return 0; }
49
50/// Metadata to speed allocatability test.
51///
52/// Keeps track of the number of infinities in each row and column.
53class MatrixMetadata {
54public:
55 MatrixMetadata(const Matrix& M)
56 : UnsafeRows(new bool[M.getRows() - 1]()),
57 UnsafeCols(new bool[M.getCols() - 1]()) {
58 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
24
Memory is allocated
59
60 for (unsigned i = 1; i < M.getRows(); ++i) {
25
Loop condition is true. Entering loop body
27
Loop condition is false. Execution continues on line 73
61 unsigned RowCount = 0;
62 for (unsigned j = 1; j < M.getCols(); ++j) {
26
Loop condition is false. Execution continues on line 70
63 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
64 ++RowCount;
65 ++ColCounts[j - 1];
66 UnsafeRows[i - 1] = true;
67 UnsafeCols[j - 1] = true;
68 }
69 }
70 WorstRow = std::max(WorstRow, RowCount);
71 }
72 unsigned WorstColCountForCurRow =
73 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
28
Use of zero-allocated memory
74 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
75 delete[] ColCounts;
76 }
77
78 MatrixMetadata(const MatrixMetadata &) = delete;
79 MatrixMetadata &operator=(const MatrixMetadata &) = delete;
80
81 unsigned getWorstRow() const { return WorstRow; }
82 unsigned getWorstCol() const { return WorstCol; }
83 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
84 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
85
86private:
87 unsigned WorstRow = 0;
88 unsigned WorstCol = 0;
89 std::unique_ptr<bool[]> UnsafeRows;
90 std::unique_ptr<bool[]> UnsafeCols;
91};
92
93/// Holds a vector of the allowed physical regs for a vreg.
94class AllowedRegVector {
95 friend hash_code hash_value(const AllowedRegVector &);
96
97public:
98 AllowedRegVector() = default;
99 AllowedRegVector(AllowedRegVector &&) = default;
100
101 AllowedRegVector(const std::vector<MCRegister> &OptVec)
102 : NumOpts(OptVec.size()), Opts(new MCRegister[NumOpts]) {
103 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
104 }
105
106 unsigned size() const { return NumOpts; }
107 MCRegister operator[](size_t I) const { return Opts[I]; }
108
109 bool operator==(const AllowedRegVector &Other) const {
110 if (NumOpts != Other.NumOpts)
111 return false;
112 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
113 }
114
115 bool operator!=(const AllowedRegVector &Other) const {
116 return !(*this == Other);
117 }
118
119private:
120 unsigned NumOpts = 0;
121 std::unique_ptr<MCRegister[]> Opts;
122};
123
124inline hash_code hash_value(const AllowedRegVector &OptRegs) {
125 MCRegister *OStart = OptRegs.Opts.get();
126 MCRegister *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
127 return hash_combine(OptRegs.NumOpts,
128 hash_combine_range(OStart, OEnd));
129}
130
131/// Holds graph-level metadata relevant to PBQP RA problems.
132class GraphMetadata {
133private:
134 using AllowedRegVecPool = ValuePool<AllowedRegVector>;
135
136public:
137 using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
138
139 GraphMetadata(MachineFunction &MF,
140 LiveIntervals &LIS,
141 MachineBlockFrequencyInfo &MBFI)
142 : MF(MF), LIS(LIS), MBFI(MBFI) {}
143
144 MachineFunction &MF;
145 LiveIntervals &LIS;
146 MachineBlockFrequencyInfo &MBFI;
147
148 void setNodeIdForVReg(Register VReg, GraphBase::NodeId NId) {
149 VRegToNodeId[VReg.id()] = NId;
150 }
151
152 GraphBase::NodeId getNodeIdForVReg(Register VReg) const {
153 auto VRegItr = VRegToNodeId.find(VReg);
154 if (VRegItr == VRegToNodeId.end())
155 return GraphBase::invalidNodeId();
156 return VRegItr->second;
157 }
158
159 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
160 return AllowedRegVecs.getValue(std::move(Allowed));
161 }
162
163private:
164 DenseMap<Register, GraphBase::NodeId> VRegToNodeId;
165 AllowedRegVecPool AllowedRegVecs;
166};
167
168/// Holds solver state and other metadata relevant to each PBQP RA node.
169class NodeMetadata {
170public:
171 using AllowedRegVector = RegAlloc::AllowedRegVector;
172
173 // The node's reduction state. The order in this enum is important,
174 // as it is assumed nodes can only progress up (i.e. towards being
175 // optimally reducible) when reducing the graph.
176 using ReductionState = enum {
177 Unprocessed,
178 NotProvablyAllocatable,
179 ConservativelyAllocatable,
180 OptimallyReducible
181 };
182
183 NodeMetadata() = default;
184
185 NodeMetadata(const NodeMetadata &Other)
186 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
187 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
188 AllowedRegs(Other.AllowedRegs)
189#ifndef NDEBUG
190 , everConservativelyAllocatable(Other.everConservativelyAllocatable)
191#endif
192 {
193 if (NumOpts > 0) {
194 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
195 &OptUnsafeEdges[0]);
196 }
197 }
198
199 NodeMetadata(NodeMetadata &&) = default;
200 NodeMetadata& operator=(NodeMetadata &&) = default;
201
202 void setVReg(Register VReg) { this->VReg = VReg; }
203 Register getVReg() const { return VReg; }
204
205 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
206 this->AllowedRegs = std::move(AllowedRegs);
207 }
208 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
209
210 void setup(const Vector& Costs) {
211 NumOpts = Costs.getLength() - 1;
212 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
213 }
214
215 ReductionState getReductionState() const { return RS; }
216 void setReductionState(ReductionState RS) {
217 assert(RS >= this->RS && "A node's reduction state can not be downgraded")((RS >= this->RS && "A node's reduction state can not be downgraded"
) ? static_cast<void> (0) : __assert_fail ("RS >= this->RS && \"A node's reduction state can not be downgraded\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 217, __PRETTY_FUNCTION__))
;
218 this->RS = RS;
219
220#ifndef NDEBUG
221 // Remember this state to assert later that a non-infinite register
222 // option was available.
223 if (RS == ConservativelyAllocatable)
224 everConservativelyAllocatable = true;
225#endif
226 }
227
228 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
229 DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
230 const bool* UnsafeOpts =
231 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
232 for (unsigned i = 0; i < NumOpts; ++i)
233 OptUnsafeEdges[i] += UnsafeOpts[i];
234 }
235
236 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
237 DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
238 const bool* UnsafeOpts =
239 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
240 for (unsigned i = 0; i < NumOpts; ++i)
241 OptUnsafeEdges[i] -= UnsafeOpts[i];
242 }
243
244 bool isConservativelyAllocatable() const {
245 return (DeniedOpts < NumOpts) ||
246 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
247 &OptUnsafeEdges[NumOpts]);
248 }
249
250#ifndef NDEBUG
251 bool wasConservativelyAllocatable() const {
252 return everConservativelyAllocatable;
253 }
254#endif
255
256private:
257 ReductionState RS = Unprocessed;
258 unsigned NumOpts = 0;
259 unsigned DeniedOpts = 0;
260 std::unique_ptr<unsigned[]> OptUnsafeEdges;
261 Register VReg;
262 GraphMetadata::AllowedRegVecRef AllowedRegs;
263
264#ifndef NDEBUG
265 bool everConservativelyAllocatable = false;
266#endif
267};
268
269class RegAllocSolverImpl {
270private:
271 using RAMatrix = MDMatrix<MatrixMetadata>;
272
273public:
274 using RawVector = PBQP::Vector;
275 using RawMatrix = PBQP::Matrix;
276 using Vector = PBQP::Vector;
277 using Matrix = RAMatrix;
278 using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
279
280 using NodeId = GraphBase::NodeId;
281 using EdgeId = GraphBase::EdgeId;
282
283 using NodeMetadata = RegAlloc::NodeMetadata;
284 struct EdgeMetadata {};
285 using GraphMetadata = RegAlloc::GraphMetadata;
286
287 using Graph = PBQP::Graph<RegAllocSolverImpl>;
288
289 RegAllocSolverImpl(Graph &G) : G(G) {}
290
291 Solution solve() {
292 G.setSolver(*this);
293 Solution S;
294 setup();
295 S = backpropagate(G, reduce());
296 G.unsetSolver();
297 return S;
298 }
299
300 void handleAddNode(NodeId NId) {
301 assert(G.getNodeCosts(NId).getLength() > 1 &&((G.getNodeCosts(NId).getLength() > 1 && "PBQP Graph should not contain single or zero-option nodes"
) ? static_cast<void> (0) : __assert_fail ("G.getNodeCosts(NId).getLength() > 1 && \"PBQP Graph should not contain single or zero-option nodes\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 302, __PRETTY_FUNCTION__))
302 "PBQP Graph should not contain single or zero-option nodes")((G.getNodeCosts(NId).getLength() > 1 && "PBQP Graph should not contain single or zero-option nodes"
) ? static_cast<void> (0) : __assert_fail ("G.getNodeCosts(NId).getLength() > 1 && \"PBQP Graph should not contain single or zero-option nodes\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 302, __PRETTY_FUNCTION__))
;
303 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
304 }
305
306 void handleRemoveNode(NodeId NId) {}
307 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
308
309 void handleAddEdge(EdgeId EId) {
310 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
311 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
312 }
313
314 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
315 NodeMetadata& NMd = G.getNodeMetadata(NId);
316 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
317 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
318 promote(NId, NMd);
319 }
320
321 void handleReconnectEdge(EdgeId EId, NodeId NId) {
322 NodeMetadata& NMd = G.getNodeMetadata(NId);
323 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
324 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
325 }
326
327 void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
328 NodeId N1Id = G.getEdgeNode1Id(EId);
329 NodeId N2Id = G.getEdgeNode2Id(EId);
330 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
331 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
332 bool Transpose = N1Id != G.getEdgeNode1Id(EId);
333
334 // Metadata are computed incrementally. First, update them
335 // by removing the old cost.
336 const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
337 N1Md.handleRemoveEdge(OldMMd, Transpose);
338 N2Md.handleRemoveEdge(OldMMd, !Transpose);
339
340 // And update now the metadata with the new cost.
341 const MatrixMetadata& MMd = NewCosts.getMetadata();
342 N1Md.handleAddEdge(MMd, Transpose);
343 N2Md.handleAddEdge(MMd, !Transpose);
344
345 // As the metadata may have changed with the update, the nodes may have
346 // become ConservativelyAllocatable or OptimallyReducible.
347 promote(N1Id, N1Md);
348 promote(N2Id, N2Md);
349 }
350
351private:
352 void promote(NodeId NId, NodeMetadata& NMd) {
353 if (G.getNodeDegree(NId) == 3) {
354 // This node is becoming optimally reducible.
355 moveToOptimallyReducibleNodes(NId);
356 } else if (NMd.getReductionState() ==
357 NodeMetadata::NotProvablyAllocatable &&
358 NMd.isConservativelyAllocatable()) {
359 // This node just became conservatively allocatable.
360 moveToConservativelyAllocatableNodes(NId);
361 }
362 }
363
364 void removeFromCurrentSet(NodeId NId) {
365 switch (G.getNodeMetadata(NId).getReductionState()) {
366 case NodeMetadata::Unprocessed: break;
367 case NodeMetadata::OptimallyReducible:
368 assert(OptimallyReducibleNodes.find(NId) !=((OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes
.end() && "Node not in optimally reducible set.") ? static_cast
<void> (0) : __assert_fail ("OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes.end() && \"Node not in optimally reducible set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 370, __PRETTY_FUNCTION__))
369 OptimallyReducibleNodes.end() &&((OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes
.end() && "Node not in optimally reducible set.") ? static_cast
<void> (0) : __assert_fail ("OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes.end() && \"Node not in optimally reducible set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 370, __PRETTY_FUNCTION__))
370 "Node not in optimally reducible set.")((OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes
.end() && "Node not in optimally reducible set.") ? static_cast
<void> (0) : __assert_fail ("OptimallyReducibleNodes.find(NId) != OptimallyReducibleNodes.end() && \"Node not in optimally reducible set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 370, __PRETTY_FUNCTION__))
;
371 OptimallyReducibleNodes.erase(NId);
372 break;
373 case NodeMetadata::ConservativelyAllocatable:
374 assert(ConservativelyAllocatableNodes.find(NId) !=((ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes
.end() && "Node not in conservatively allocatable set."
) ? static_cast<void> (0) : __assert_fail ("ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes.end() && \"Node not in conservatively allocatable set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 376, __PRETTY_FUNCTION__))
375 ConservativelyAllocatableNodes.end() &&((ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes
.end() && "Node not in conservatively allocatable set."
) ? static_cast<void> (0) : __assert_fail ("ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes.end() && \"Node not in conservatively allocatable set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 376, __PRETTY_FUNCTION__))
376 "Node not in conservatively allocatable set.")((ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes
.end() && "Node not in conservatively allocatable set."
) ? static_cast<void> (0) : __assert_fail ("ConservativelyAllocatableNodes.find(NId) != ConservativelyAllocatableNodes.end() && \"Node not in conservatively allocatable set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 376, __PRETTY_FUNCTION__))
;
377 ConservativelyAllocatableNodes.erase(NId);
378 break;
379 case NodeMetadata::NotProvablyAllocatable:
380 assert(NotProvablyAllocatableNodes.find(NId) !=((NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes
.end() && "Node not in not-provably-allocatable set."
) ? static_cast<void> (0) : __assert_fail ("NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes.end() && \"Node not in not-provably-allocatable set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 382, __PRETTY_FUNCTION__))
381 NotProvablyAllocatableNodes.end() &&((NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes
.end() && "Node not in not-provably-allocatable set."
) ? static_cast<void> (0) : __assert_fail ("NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes.end() && \"Node not in not-provably-allocatable set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 382, __PRETTY_FUNCTION__))
382 "Node not in not-provably-allocatable set.")((NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes
.end() && "Node not in not-provably-allocatable set."
) ? static_cast<void> (0) : __assert_fail ("NotProvablyAllocatableNodes.find(NId) != NotProvablyAllocatableNodes.end() && \"Node not in not-provably-allocatable set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 382, __PRETTY_FUNCTION__))
;
383 NotProvablyAllocatableNodes.erase(NId);
384 break;
385 }
386 }
387
388 void moveToOptimallyReducibleNodes(NodeId NId) {
389 removeFromCurrentSet(NId);
390 OptimallyReducibleNodes.insert(NId);
391 G.getNodeMetadata(NId).setReductionState(
392 NodeMetadata::OptimallyReducible);
393 }
394
395 void moveToConservativelyAllocatableNodes(NodeId NId) {
396 removeFromCurrentSet(NId);
397 ConservativelyAllocatableNodes.insert(NId);
398 G.getNodeMetadata(NId).setReductionState(
399 NodeMetadata::ConservativelyAllocatable);
400 }
401
402 void moveToNotProvablyAllocatableNodes(NodeId NId) {
403 removeFromCurrentSet(NId);
404 NotProvablyAllocatableNodes.insert(NId);
405 G.getNodeMetadata(NId).setReductionState(
406 NodeMetadata::NotProvablyAllocatable);
407 }
408
409 void setup() {
410 // Set up worklists.
411 for (auto NId : G.nodeIds()) {
412 if (G.getNodeDegree(NId) < 3)
413 moveToOptimallyReducibleNodes(NId);
414 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
415 moveToConservativelyAllocatableNodes(NId);
416 else
417 moveToNotProvablyAllocatableNodes(NId);
418 }
419 }
420
421 // Compute a reduction order for the graph by iteratively applying PBQP
422 // reduction rules. Locally optimal rules are applied whenever possible (R0,
423 // R1, R2). If no locally-optimal rules apply then any conservatively
424 // allocatable node is reduced. Finally, if no conservatively allocatable
425 // node exists then the node with the lowest spill-cost:degree ratio is
426 // selected.
427 std::vector<GraphBase::NodeId> reduce() {
428 assert(!G.empty() && "Cannot reduce empty graph.")((!G.empty() && "Cannot reduce empty graph.") ? static_cast
<void> (0) : __assert_fail ("!G.empty() && \"Cannot reduce empty graph.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 428, __PRETTY_FUNCTION__))
;
429
430 using NodeId = GraphBase::NodeId;
431 std::vector<NodeId> NodeStack;
432
433 // Consume worklists.
434 while (true) {
435 if (!OptimallyReducibleNodes.empty()) {
436 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
437 NodeId NId = *NItr;
438 OptimallyReducibleNodes.erase(NItr);
439 NodeStack.push_back(NId);
440 switch (G.getNodeDegree(NId)) {
441 case 0:
442 break;
443 case 1:
444 applyR1(G, NId);
445 break;
446 case 2:
447 applyR2(G, NId);
448 break;
449 default: llvm_unreachable("Not an optimally reducible node.")::llvm::llvm_unreachable_internal("Not an optimally reducible node."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/RegAllocPBQP.h"
, 449)
;
450 }
451 } else if (!ConservativelyAllocatableNodes.empty()) {
452 // Conservatively allocatable nodes will never spill. For now just
453 // take the first node in the set and push it on the stack. When we
454 // start optimizing more heavily for register preferencing, it may
455 // would be better to push nodes with lower 'expected' or worst-case
456 // register costs first (since early nodes are the most
457 // constrained).
458 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
459 NodeId NId = *NItr;
460 ConservativelyAllocatableNodes.erase(NItr);
461 NodeStack.push_back(NId);
462 G.disconnectAllNeighborsFromNode(NId);
463 } else if (!NotProvablyAllocatableNodes.empty()) {
464 NodeSet::iterator NItr =
465 std::min_element(NotProvablyAllocatableNodes.begin(),
466 NotProvablyAllocatableNodes.end(),
467 SpillCostComparator(G));
468 NodeId NId = *NItr;
469 NotProvablyAllocatableNodes.erase(NItr);
470 NodeStack.push_back(NId);
471 G.disconnectAllNeighborsFromNode(NId);
472 } else
473 break;
474 }
475
476 return NodeStack;
477 }
478
479 class SpillCostComparator {
480 public:
481 SpillCostComparator(const Graph& G) : G(G) {}
482
483 bool operator()(NodeId N1Id, NodeId N2Id) {
484 PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
485 PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
486 if (N1SC == N2SC)
487 return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
488 return N1SC < N2SC;
489 }
490
491 private:
492 const Graph& G;
493 };
494
495 Graph& G;
496 using NodeSet = std::set<NodeId>;
497 NodeSet OptimallyReducibleNodes;
498 NodeSet ConservativelyAllocatableNodes;
499 NodeSet NotProvablyAllocatableNodes;
500};
501
502class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
503private:
504 using BaseT = PBQP::Graph<RegAllocSolverImpl>;
505
506public:
507 PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
508
509 /// Dump this graph to dbgs().
510 void dump() const;
511
512 /// Dump this graph to an output stream.
513 /// @param OS Output stream to print on.
514 void dump(raw_ostream &OS) const;
515
516 /// Print a representation of this graph in DOT format.
517 /// @param OS Output stream to print on.
518 void printDot(raw_ostream &OS) const;
519};
520
521inline Solution solve(PBQPRAGraph& G) {
522 if (G.empty())
523 return Solution();
524 RegAllocSolverImpl RegAllocSolver(G);
525 return RegAllocSolver.solve();
526}
527
528} // end namespace RegAlloc
529} // end namespace PBQP
530
531/// Create a PBQP register allocator instance.
532FunctionPass *
533createPBQPRegisterAllocator(char *customPassID = nullptr);
534
535} // end namespace llvm
536
537#endif // LLVM_CODEGEN_REGALLOCPBQP_H