Bug Summary

File:include/llvm/ADT/FunctionExtras.h
Warning:line 143, column 5
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TUScheduler.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/tools/clang/tools/extra/clangd -I /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd -I /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/tools/clang/tools/extra/clangd -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp -faddrsig

/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp

1//===--- TUScheduler.cpp -----------------------------------------*-C++-*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// For each file, managed by TUScheduler, we create a single ASTWorker that
10// manages an AST for that file. All operations that modify or read the AST are
11// run on a separate dedicated thread asynchronously in FIFO order.
12//
13// We start processing each update immediately after we receive it. If two or
14// more updates come subsequently without reads in-between, we attempt to drop
15// an older one to not waste time building the ASTs we don't need.
16//
17// The processing thread of the ASTWorker is also responsible for building the
18// preamble. However, unlike AST, the same preamble can be read concurrently, so
19// we run each of async preamble reads on its own thread.
20//
21// To limit the concurrent load that clangd produces we mantain a semaphore that
22// keeps more than a fixed number of threads from running concurrently.
23//
24// Rationale for cancelling updates.
25// LSP clients can send updates to clangd on each keystroke. Some files take
26// significant time to parse (e.g. a few seconds) and clangd can get starved by
27// the updates to those files. Therefore we try to process only the last update,
28// if possible.
29// Our current strategy to do that is the following:
30// - For each update we immediately schedule rebuild of the AST.
31// - Rebuild of the AST checks if it was cancelled before doing any actual work.
32// If it was, it does not do an actual rebuild, only reports llvm::None to the
33// callback
34// - When adding an update, we cancel the last update in the queue if it didn't
35// have any reads.
36// There is probably a optimal ways to do that. One approach we might take is
37// the following:
38// - For each update we remember the pending inputs, but delay rebuild of the
39// AST for some timeout.
40// - If subsequent updates come before rebuild was started, we replace the
41// pending inputs and reset the timer.
42// - If any reads of the AST are scheduled, we start building the AST
43// immediately.
44
45#include "TUScheduler.h"
46#include "Logger.h"
47#include "Trace.h"
48#include "clang/Frontend/CompilerInvocation.h"
49#include "clang/Frontend/PCHContainerOperations.h"
50#include "llvm/ADT/ScopeExit.h"
51#include "llvm/Support/Errc.h"
52#include "llvm/Support/Path.h"
53#include <algorithm>
54#include <memory>
55#include <queue>
56#include <thread>
57
58namespace clang {
59namespace clangd {
60using std::chrono::steady_clock;
61
62namespace {
63class ASTWorker;
64}
65
66/// An LRU cache of idle ASTs.
67/// Because we want to limit the overall number of these we retain, the cache
68/// owns ASTs (and may evict them) while their workers are idle.
69/// Workers borrow ASTs when active, and return them when done.
70class TUScheduler::ASTCache {
71public:
72 using Key = const ASTWorker *;
73
74 ASTCache(unsigned MaxRetainedASTs) : MaxRetainedASTs(MaxRetainedASTs) {}
75
76 /// Returns result of getUsedBytes() for the AST cached by \p K.
77 /// If no AST is cached, 0 is returned.
78 std::size_t getUsedBytes(Key K) {
79 std::lock_guard<std::mutex> Lock(Mut);
80 auto It = findByKey(K);
81 if (It == LRU.end() || !It->second)
82 return 0;
83 return It->second->getUsedBytes();
84 }
85
86 /// Store the value in the pool, possibly removing the last used AST.
87 /// The value should not be in the pool when this function is called.
88 void put(Key K, std::unique_ptr<ParsedAST> V) {
89 std::unique_lock<std::mutex> Lock(Mut);
90 assert(findByKey(K) == LRU.end())(static_cast <bool> (findByKey(K) == LRU.end()) ? void (
0) : __assert_fail ("findByKey(K) == LRU.end()", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 90, __extension__ __PRETTY_FUNCTION__))
;
91
92 LRU.insert(LRU.begin(), {K, std::move(V)});
93 if (LRU.size() <= MaxRetainedASTs)
94 return;
95 // We're past the limit, remove the last element.
96 std::unique_ptr<ParsedAST> ForCleanup = std::move(LRU.back().second);
97 LRU.pop_back();
98 // Run the expensive destructor outside the lock.
99 Lock.unlock();
100 ForCleanup.reset();
101 }
102
103 /// Returns the cached value for \p K, or llvm::None if the value is not in
104 /// the cache anymore. If nullptr was cached for \p K, this function will
105 /// return a null unique_ptr wrapped into an optional.
106 llvm::Optional<std::unique_ptr<ParsedAST>> take(Key K) {
107 std::unique_lock<std::mutex> Lock(Mut);
108 auto Existing = findByKey(K);
109 if (Existing == LRU.end())
110 return llvm::None;
111 std::unique_ptr<ParsedAST> V = std::move(Existing->second);
112 LRU.erase(Existing);
113 // GCC 4.8 fails to compile `return V;`, as it tries to call the copy
114 // constructor of unique_ptr, so we call the move ctor explicitly to avoid
115 // this miscompile.
116 return llvm::Optional<std::unique_ptr<ParsedAST>>(std::move(V));
117 }
118
119private:
120 using KVPair = std::pair<Key, std::unique_ptr<ParsedAST>>;
121
122 std::vector<KVPair>::iterator findByKey(Key K) {
123 return std::find_if(LRU.begin(), LRU.end(),
124 [K](const KVPair &P) { return P.first == K; });
125 }
126
127 std::mutex Mut;
128 unsigned MaxRetainedASTs;
129 /// Items sorted in LRU order, i.e. first item is the most recently accessed
130 /// one.
131 std::vector<KVPair> LRU; /* GUARDED_BY(Mut) */
132};
133
134namespace {
135class ASTWorkerHandle;
136
137/// Owns one instance of the AST, schedules updates and reads of it.
138/// Also responsible for building and providing access to the preamble.
139/// Each ASTWorker processes the async requests sent to it on a separate
140/// dedicated thread.
141/// The ASTWorker that manages the AST is shared by both the processing thread
142/// and the TUScheduler. The TUScheduler should discard an ASTWorker when
143/// remove() is called, but its thread may be busy and we don't want to block.
144/// So the workers are accessed via an ASTWorkerHandle. Destroying the handle
145/// signals the worker to exit its run loop and gives up shared ownership of the
146/// worker.
147class ASTWorker {
148 friend class ASTWorkerHandle;
149 ASTWorker(PathRef FileName, TUScheduler::ASTCache &LRUCache,
150 Semaphore &Barrier, bool RunSync,
151 steady_clock::duration UpdateDebounce,
152 std::shared_ptr<PCHContainerOperations> PCHs,
153 bool StorePreamblesInMemory,
154 PreambleParsedCallback PreambleCallback);
155
156public:
157 /// Create a new ASTWorker and return a handle to it.
158 /// The processing thread is spawned using \p Tasks. However, when \p Tasks
159 /// is null, all requests will be processed on the calling thread
160 /// synchronously instead. \p Barrier is acquired when processing each
161 /// request, it is be used to limit the number of actively running threads.
162 static ASTWorkerHandle create(PathRef FileName,
163 TUScheduler::ASTCache &IdleASTs,
164 AsyncTaskRunner *Tasks, Semaphore &Barrier,
165 steady_clock::duration UpdateDebounce,
166 std::shared_ptr<PCHContainerOperations> PCHs,
167 bool StorePreamblesInMemory,
168 PreambleParsedCallback PreambleCallback);
169 ~ASTWorker();
170
171 void update(ParseInputs Inputs, WantDiagnostics,
172 llvm::unique_function<void(std::vector<Diag>)> OnUpdated);
173 void
174 runWithAST(llvm::StringRef Name,
175 llvm::unique_function<void(llvm::Expected<InputsAndAST>)> Action);
176 bool blockUntilIdle(Deadline Timeout) const;
177
178 std::shared_ptr<const PreambleData> getPossiblyStalePreamble() const;
179 /// Wait for the first build of preamble to finish. Preamble itself can be
180 /// accessed via getPossibleStalePreamble(). Note that this function will
181 /// return after an unsuccessful build of the preamble too, i.e. result of
182 /// getPossiblyStalePreamble() can be null even after this function returns.
183 void waitForFirstPreamble() const;
184
185 std::size_t getUsedBytes() const;
186 bool isASTCached() const;
187
188private:
189 // Must be called exactly once on processing thread. Will return after
190 // stop() is called on a separate thread and all pending requests are
191 // processed.
192 void run();
193 /// Signal that run() should finish processing pending requests and exit.
194 void stop();
195 /// Adds a new task to the end of the request queue.
196 void startTask(llvm::StringRef Name, llvm::unique_function<void()> Task,
197 llvm::Optional<WantDiagnostics> UpdateType);
198 /// Determines the next action to perform.
199 /// All actions that should never run are disarded.
200 /// Returns a deadline for the next action. If it's expired, run now.
201 /// scheduleLocked() is called again at the deadline, or if requests arrive.
202 Deadline scheduleLocked();
203 /// Should the first task in the queue be skipped instead of run?
204 bool shouldSkipHeadLocked() const;
205
206 struct Request {
207 llvm::unique_function<void()> Action;
208 std::string Name;
209 steady_clock::time_point AddTime;
210 Context Ctx;
211 llvm::Optional<WantDiagnostics> UpdateType;
212 };
213
214 /// Handles retention of ASTs.
215 TUScheduler::ASTCache &IdleASTs;
216 const bool RunSync;
217 /// Time to wait after an update to see whether another update obsoletes it.
218 const steady_clock::duration UpdateDebounce;
219 /// File that ASTWorker is reponsible for.
220 const Path FileName;
221 /// Whether to keep the built preambles in memory or on disk.
222 const bool StorePreambleInMemory;
223 /// Callback, passed to the preamble builder.
224 const PreambleParsedCallback PreambleCallback;
225 /// Helper class required to build the ASTs.
226 const std::shared_ptr<PCHContainerOperations> PCHs;
227
228 Semaphore &Barrier;
229 /// Inputs, corresponding to the current state of AST.
230 ParseInputs FileInputs;
231 /// Size of the last AST
232 /// Guards members used by both TUScheduler and the worker thread.
233 mutable std::mutex Mutex;
234 std::shared_ptr<const PreambleData> LastBuiltPreamble; /* GUARDED_BY(Mutex) */
235 /// Becomes ready when the first preamble build finishes.
236 Notification PreambleWasBuilt;
237 /// Set to true to signal run() to finish processing.
238 bool Done; /* GUARDED_BY(Mutex) */
239 std::deque<Request> Requests; /* GUARDED_BY(Mutex) */
240 mutable std::condition_variable RequestsCV;
241};
242
243/// A smart-pointer-like class that points to an active ASTWorker.
244/// In destructor, signals to the underlying ASTWorker that no new requests will
245/// be sent and the processing loop may exit (after running all pending
246/// requests).
247class ASTWorkerHandle {
248 friend class ASTWorker;
249 ASTWorkerHandle(std::shared_ptr<ASTWorker> Worker)
250 : Worker(std::move(Worker)) {
251 assert(this->Worker)(static_cast <bool> (this->Worker) ? void (0) : __assert_fail
("this->Worker", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 251, __extension__ __PRETTY_FUNCTION__))
;
252 }
253
254public:
255 ASTWorkerHandle(const ASTWorkerHandle &) = delete;
256 ASTWorkerHandle &operator=(const ASTWorkerHandle &) = delete;
257 ASTWorkerHandle(ASTWorkerHandle &&) = default;
258 ASTWorkerHandle &operator=(ASTWorkerHandle &&) = default;
259
260 ~ASTWorkerHandle() {
261 if (Worker)
262 Worker->stop();
263 }
264
265 ASTWorker &operator*() {
266 assert(Worker && "Handle was moved from")(static_cast <bool> (Worker && "Handle was moved from"
) ? void (0) : __assert_fail ("Worker && \"Handle was moved from\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 266, __extension__ __PRETTY_FUNCTION__))
;
267 return *Worker;
268 }
269
270 ASTWorker *operator->() {
271 assert(Worker && "Handle was moved from")(static_cast <bool> (Worker && "Handle was moved from"
) ? void (0) : __assert_fail ("Worker && \"Handle was moved from\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 271, __extension__ __PRETTY_FUNCTION__))
;
272 return Worker.get();
273 }
274
275 /// Returns an owning reference to the underlying ASTWorker that can outlive
276 /// the ASTWorkerHandle. However, no new requests to an active ASTWorker can
277 /// be schedule via the returned reference, i.e. only reads of the preamble
278 /// are possible.
279 std::shared_ptr<const ASTWorker> lock() { return Worker; }
280
281private:
282 std::shared_ptr<ASTWorker> Worker;
283};
284
285ASTWorkerHandle ASTWorker::create(PathRef FileName,
286 TUScheduler::ASTCache &IdleASTs,
287 AsyncTaskRunner *Tasks, Semaphore &Barrier,
288 steady_clock::duration UpdateDebounce,
289 std::shared_ptr<PCHContainerOperations> PCHs,
290 bool StorePreamblesInMemory,
291 PreambleParsedCallback PreambleCallback) {
292 std::shared_ptr<ASTWorker> Worker(new ASTWorker(
293 FileName, IdleASTs, Barrier, /*RunSync=*/!Tasks, UpdateDebounce,
294 std::move(PCHs), StorePreamblesInMemory, std::move(PreambleCallback)));
295 if (Tasks)
296 Tasks->runAsync("worker:" + llvm::sys::path::filename(FileName),
297 [Worker]() { Worker->run(); });
298
299 return ASTWorkerHandle(std::move(Worker));
300}
301
302ASTWorker::ASTWorker(PathRef FileName, TUScheduler::ASTCache &LRUCache,
303 Semaphore &Barrier, bool RunSync,
304 steady_clock::duration UpdateDebounce,
305 std::shared_ptr<PCHContainerOperations> PCHs,
306 bool StorePreamblesInMemory,
307 PreambleParsedCallback PreambleCallback)
308 : IdleASTs(LRUCache), RunSync(RunSync), UpdateDebounce(UpdateDebounce),
309 FileName(FileName), StorePreambleInMemory(StorePreamblesInMemory),
310 PreambleCallback(std::move(PreambleCallback)), PCHs(std::move(PCHs)),
311 Barrier(Barrier), Done(false) {}
312
313ASTWorker::~ASTWorker() {
314 // Make sure we remove the cached AST, if any.
315 IdleASTs.take(this);
316#ifndef NDEBUG
317 std::lock_guard<std::mutex> Lock(Mutex);
318 assert(Done && "handle was not destroyed")(static_cast <bool> (Done && "handle was not destroyed"
) ? void (0) : __assert_fail ("Done && \"handle was not destroyed\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 318, __extension__ __PRETTY_FUNCTION__))
;
319 assert(Requests.empty() && "unprocessed requests when destroying ASTWorker")(static_cast <bool> (Requests.empty() && "unprocessed requests when destroying ASTWorker"
) ? void (0) : __assert_fail ("Requests.empty() && \"unprocessed requests when destroying ASTWorker\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 319, __extension__ __PRETTY_FUNCTION__))
;
320#endif
321}
322
323void ASTWorker::update(
324 ParseInputs Inputs, WantDiagnostics WantDiags,
325 llvm::unique_function<void(std::vector<Diag>)> OnUpdated) {
326 auto Task = [=](decltype(OnUpdated) OnUpdated) mutable {
327 // Will be used to check if we can avoid rebuilding the AST.
328 bool InputsAreTheSame =
329 std::tie(FileInputs.CompileCommand, FileInputs.Contents) ==
330 std::tie(Inputs.CompileCommand, Inputs.Contents);
331
332 tooling::CompileCommand OldCommand = std::move(FileInputs.CompileCommand);
333 FileInputs = Inputs;
334 // Remove the old AST if it's still in cache.
335 IdleASTs.take(this);
336
337 log("Updating file {0} with command [{1}] {2}", FileName,
338 Inputs.CompileCommand.Directory,
339 llvm::join(Inputs.CompileCommand.CommandLine, " "));
340 // Rebuild the preamble and the AST.
341 std::unique_ptr<CompilerInvocation> Invocation =
342 buildCompilerInvocation(Inputs);
343 if (!Invocation) {
344 elog("Could not build CompilerInvocation for file {0}", FileName);
345 // Make sure anyone waiting for the preamble gets notified it could not
346 // be built.
347 PreambleWasBuilt.notify();
348 return;
349 }
350
351 std::shared_ptr<const PreambleData> OldPreamble =
352 getPossiblyStalePreamble();
353 std::shared_ptr<const PreambleData> NewPreamble =
354 buildPreamble(FileName, *Invocation, OldPreamble, OldCommand, Inputs,
355 PCHs, StorePreambleInMemory, PreambleCallback);
356
357 bool CanReuseAST = InputsAreTheSame && (OldPreamble == NewPreamble);
358 {
359 std::lock_guard<std::mutex> Lock(Mutex);
360 if (NewPreamble)
361 LastBuiltPreamble = NewPreamble;
362 }
363 // Before doing the expensive AST reparse, we want to release our reference
364 // to the old preamble, so it can be freed if there are no other references
365 // to it.
366 OldPreamble.reset();
367 PreambleWasBuilt.notify();
368
369 if (CanReuseAST) {
370 // Take a shortcut and don't build the AST, neither the inputs nor the
371 // preamble have changed.
372 // Note that we do not report the diagnostics, since they should not have
373 // changed either. All the clients should handle the lack of OnUpdated()
374 // call anyway, to handle empty result from buildAST.
375 // FIXME(ibiryukov): the AST could actually change if non-preamble
376 // includes changed, but we choose to ignore it.
377 // FIXME(ibiryukov): should we refresh the cache in IdleASTs for the
378 // current file at this point?
379 log("Skipping rebuild of the AST for {0}, inputs are the same.",
380 FileName);
381 return;
382 }
383 // Build the AST for diagnostics.
384 llvm::Optional<ParsedAST> AST =
385 buildAST(FileName, std::move(Invocation), Inputs, NewPreamble, PCHs);
386 // We want to report the diagnostics even if this update was cancelled.
387 // It seems more useful than making the clients wait indefinitely if they
388 // spam us with updates.
389 if (WantDiags != WantDiagnostics::No && AST)
390 OnUpdated(AST->getDiagnostics());
391 // Stash the AST in the cache for further use.
392 IdleASTs.put(this,
393 AST ? llvm::make_unique<ParsedAST>(std::move(*AST)) : nullptr);
394 };
395
396 startTask("Update", Bind(Task, std::move(OnUpdated)), WantDiags);
397}
398
399void ASTWorker::runWithAST(
400 llvm::StringRef Name,
401 llvm::unique_function<void(llvm::Expected<InputsAndAST>)> Action) {
402 auto Task = [=](decltype(Action) Action) {
403 llvm::Optional<std::unique_ptr<ParsedAST>> AST = IdleASTs.take(this);
404 if (!AST) {
405 std::unique_ptr<CompilerInvocation> Invocation =
406 buildCompilerInvocation(FileInputs);
407 // Try rebuilding the AST.
408 llvm::Optional<ParsedAST> NewAST =
409 Invocation
410 ? buildAST(FileName,
411 llvm::make_unique<CompilerInvocation>(*Invocation),
412 FileInputs, getPossiblyStalePreamble(), PCHs)
413 : llvm::None;
414 AST = NewAST ? llvm::make_unique<ParsedAST>(std::move(*NewAST)) : nullptr;
415 }
416 // Make sure we put the AST back into the LRU cache.
417 auto _ = llvm::make_scope_exit(
418 [&AST, this]() { IdleASTs.put(this, std::move(*AST)); });
419 // Run the user-provided action.
420 if (!*AST)
421 return Action(llvm::make_error<llvm::StringError>(
422 "invalid AST", llvm::errc::invalid_argument));
423 Action(InputsAndAST{FileInputs, **AST});
424 };
425 startTask(Name, Bind(Task, std::move(Action)),
4
Calling 'Bind<(lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
426 /*UpdateType=*/llvm::None);
427}
428
429std::shared_ptr<const PreambleData>
430ASTWorker::getPossiblyStalePreamble() const {
431 std::lock_guard<std::mutex> Lock(Mutex);
432 return LastBuiltPreamble;
433}
434
435void ASTWorker::waitForFirstPreamble() const {
436 PreambleWasBuilt.wait();
437}
438
439std::size_t ASTWorker::getUsedBytes() const {
440 // Note that we don't report the size of ASTs currently used for processing
441 // the in-flight requests. We used this information for debugging purposes
442 // only, so this should be fine.
443 std::size_t Result = IdleASTs.getUsedBytes(this);
444 if (auto Preamble = getPossiblyStalePreamble())
445 Result += Preamble->Preamble.getSize();
446 return Result;
447}
448
449bool ASTWorker::isASTCached() const { return IdleASTs.getUsedBytes(this) != 0; }
450
451void ASTWorker::stop() {
452 {
453 std::lock_guard<std::mutex> Lock(Mutex);
454 assert(!Done && "stop() called twice")(static_cast <bool> (!Done && "stop() called twice"
) ? void (0) : __assert_fail ("!Done && \"stop() called twice\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 454, __extension__ __PRETTY_FUNCTION__))
;
455 Done = true;
456 }
457 RequestsCV.notify_all();
458}
459
460void ASTWorker::startTask(llvm::StringRef Name,
461 llvm::unique_function<void()> Task,
462 llvm::Optional<WantDiagnostics> UpdateType) {
463 if (RunSync) {
464 assert(!Done && "running a task after stop()")(static_cast <bool> (!Done && "running a task after stop()"
) ? void (0) : __assert_fail ("!Done && \"running a task after stop()\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 464, __extension__ __PRETTY_FUNCTION__))
;
465 trace::Span Tracer(Name + ":" + llvm::sys::path::filename(FileName));
466 Task();
467 return;
468 }
469
470 {
471 std::lock_guard<std::mutex> Lock(Mutex);
472 assert(!Done && "running a task after stop()")(static_cast <bool> (!Done && "running a task after stop()"
) ? void (0) : __assert_fail ("!Done && \"running a task after stop()\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 472, __extension__ __PRETTY_FUNCTION__))
;
473 Requests.push_back({std::move(Task), Name, steady_clock::now(),
474 Context::current().clone(), UpdateType});
475 }
476 RequestsCV.notify_all();
477}
478
479void ASTWorker::run() {
480 while (true) {
481 Request Req;
482 {
483 std::unique_lock<std::mutex> Lock(Mutex);
484 for (auto Wait = scheduleLocked(); !Wait.expired();
485 Wait = scheduleLocked()) {
486 if (Done) {
487 if (Requests.empty())
488 return;
489 else // Even though Done is set, finish pending requests.
490 break; // However, skip delays to shutdown fast.
491 }
492
493 // Tracing: we have a next request, attribute this sleep to it.
494 Optional<WithContext> Ctx;
495 Optional<trace::Span> Tracer;
496 if (!Requests.empty()) {
497 Ctx.emplace(Requests.front().Ctx.clone());
498 Tracer.emplace("Debounce");
499 SPAN_ATTACH(*Tracer, "next_request", Requests.front().Name)do { if (auto *Args = (*Tracer).Args) (*Args)["next_request"]
= Requests.front().Name; } while (0)
;
500 if (!(Wait == Deadline::infinity()))
501 SPAN_ATTACH(*Tracer, "sleep_ms",do { if (auto *Args = (*Tracer).Args) (*Args)["sleep_ms"] = std
::chrono::duration_cast<std::chrono::milliseconds>( Wait
.time() - steady_clock::now()) .count(); } while (0)
502 std::chrono::duration_cast<std::chrono::milliseconds>(do { if (auto *Args = (*Tracer).Args) (*Args)["sleep_ms"] = std
::chrono::duration_cast<std::chrono::milliseconds>( Wait
.time() - steady_clock::now()) .count(); } while (0)
503 Wait.time() - steady_clock::now())do { if (auto *Args = (*Tracer).Args) (*Args)["sleep_ms"] = std
::chrono::duration_cast<std::chrono::milliseconds>( Wait
.time() - steady_clock::now()) .count(); } while (0)
504 .count())do { if (auto *Args = (*Tracer).Args) (*Args)["sleep_ms"] = std
::chrono::duration_cast<std::chrono::milliseconds>( Wait
.time() - steady_clock::now()) .count(); } while (0)
;
505 }
506
507 wait(Lock, RequestsCV, Wait);
508 }
509 Req = std::move(Requests.front());
510 // Leave it on the queue for now, so waiters don't see an empty queue.
511 } // unlock Mutex
512
513 {
514 std::lock_guard<Semaphore> BarrierLock(Barrier);
515 WithContext Guard(std::move(Req.Ctx));
516 trace::Span Tracer(Req.Name);
517 Req.Action();
518 }
519
520 {
521 std::lock_guard<std::mutex> Lock(Mutex);
522 Requests.pop_front();
523 }
524 RequestsCV.notify_all();
525 }
526}
527
528Deadline ASTWorker::scheduleLocked() {
529 if (Requests.empty())
530 return Deadline::infinity(); // Wait for new requests.
531 while (shouldSkipHeadLocked())
532 Requests.pop_front();
533 assert(!Requests.empty() && "skipped the whole queue")(static_cast <bool> (!Requests.empty() && "skipped the whole queue"
) ? void (0) : __assert_fail ("!Requests.empty() && \"skipped the whole queue\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 533, __extension__ __PRETTY_FUNCTION__))
;
534 // Some updates aren't dead yet, but never end up being used.
535 // e.g. the first keystroke is live until obsoleted by the second.
536 // We debounce "maybe-unused" writes, sleeping 500ms in case they become dead.
537 // But don't delay reads (including updates where diagnostics are needed).
538 for (const auto &R : Requests)
539 if (R.UpdateType == None || R.UpdateType == WantDiagnostics::Yes)
540 return Deadline::zero();
541 // Front request needs to be debounced, so determine when we're ready.
542 Deadline D(Requests.front().AddTime + UpdateDebounce);
543 return D;
544}
545
546// Returns true if Requests.front() is a dead update that can be skipped.
547bool ASTWorker::shouldSkipHeadLocked() const {
548 assert(!Requests.empty())(static_cast <bool> (!Requests.empty()) ? void (0) : __assert_fail
("!Requests.empty()", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 548, __extension__ __PRETTY_FUNCTION__))
;
549 auto Next = Requests.begin();
550 auto UpdateType = Next->UpdateType;
551 if (!UpdateType) // Only skip updates.
552 return false;
553 ++Next;
554 // An update is live if its AST might still be read.
555 // That is, if it's not immediately followed by another update.
556 if (Next == Requests.end() || !Next->UpdateType)
557 return false;
558 // The other way an update can be live is if its diagnostics might be used.
559 switch (*UpdateType) {
560 case WantDiagnostics::Yes:
561 return false; // Always used.
562 case WantDiagnostics::No:
563 return true; // Always dead.
564 case WantDiagnostics::Auto:
565 // Used unless followed by an update that generates diagnostics.
566 for (; Next != Requests.end(); ++Next)
567 if (Next->UpdateType == WantDiagnostics::Yes ||
568 Next->UpdateType == WantDiagnostics::Auto)
569 return true; // Prefer later diagnostics.
570 return false;
571 }
572 llvm_unreachable("Unknown WantDiagnostics")::llvm::llvm_unreachable_internal("Unknown WantDiagnostics", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp"
, 572)
;
573}
574
575bool ASTWorker::blockUntilIdle(Deadline Timeout) const {
576 std::unique_lock<std::mutex> Lock(Mutex);
577 return wait(Lock, RequestsCV, Timeout, [&] { return Requests.empty(); });
578}
579
580} // namespace
581
582unsigned getDefaultAsyncThreadsCount() {
583 unsigned HardwareConcurrency = std::thread::hardware_concurrency();
584 // C++ standard says that hardware_concurrency()
585 // may return 0, fallback to 1 worker thread in
586 // that case.
587 if (HardwareConcurrency == 0)
588 return 1;
589 return HardwareConcurrency;
590}
591
592struct TUScheduler::FileData {
593 /// Latest inputs, passed to TUScheduler::update().
594 std::string Contents;
595 tooling::CompileCommand Command;
596 ASTWorkerHandle Worker;
597};
598
599TUScheduler::TUScheduler(unsigned AsyncThreadsCount,
600 bool StorePreamblesInMemory,
601 PreambleParsedCallback PreambleCallback,
602 std::chrono::steady_clock::duration UpdateDebounce,
603 ASTRetentionPolicy RetentionPolicy)
604 : StorePreamblesInMemory(StorePreamblesInMemory),
605 PCHOps(std::make_shared<PCHContainerOperations>()),
606 PreambleCallback(std::move(PreambleCallback)), Barrier(AsyncThreadsCount),
607 IdleASTs(llvm::make_unique<ASTCache>(RetentionPolicy.MaxRetainedASTs)),
608 UpdateDebounce(UpdateDebounce) {
609 if (0 < AsyncThreadsCount) {
610 PreambleTasks.emplace();
611 WorkerThreads.emplace();
612 }
613}
614
615TUScheduler::~TUScheduler() {
616 // Notify all workers that they need to stop.
617 Files.clear();
618
619 // Wait for all in-flight tasks to finish.
620 if (PreambleTasks)
621 PreambleTasks->wait();
622 if (WorkerThreads)
623 WorkerThreads->wait();
624}
625
626bool TUScheduler::blockUntilIdle(Deadline D) const {
627 for (auto &File : Files)
628 if (!File.getValue()->Worker->blockUntilIdle(D))
629 return false;
630 if (PreambleTasks)
631 if (!PreambleTasks->wait(D))
632 return false;
633 return true;
634}
635
636void TUScheduler::update(
637 PathRef File, ParseInputs Inputs, WantDiagnostics WantDiags,
638 llvm::unique_function<void(std::vector<Diag>)> OnUpdated) {
639 std::unique_ptr<FileData> &FD = Files[File];
640 if (!FD) {
641 // Create a new worker to process the AST-related tasks.
642 ASTWorkerHandle Worker = ASTWorker::create(
643 File, *IdleASTs, WorkerThreads ? WorkerThreads.getPointer() : nullptr,
644 Barrier, UpdateDebounce, PCHOps, StorePreamblesInMemory,
645 PreambleCallback);
646 FD = std::unique_ptr<FileData>(new FileData{
647 Inputs.Contents, Inputs.CompileCommand, std::move(Worker)});
648 } else {
649 FD->Contents = Inputs.Contents;
650 FD->Command = Inputs.CompileCommand;
651 }
652 FD->Worker->update(std::move(Inputs), WantDiags, std::move(OnUpdated));
653}
654
655void TUScheduler::remove(PathRef File) {
656 bool Removed = Files.erase(File);
657 if (!Removed)
658 elog("Trying to remove file from TUScheduler that is not tracked: {0}",
659 File);
660}
661
662void TUScheduler::runWithAST(
663 llvm::StringRef Name, PathRef File,
664 llvm::unique_function<void(llvm::Expected<InputsAndAST>)> Action) {
665 auto It = Files.find(File);
666 if (It == Files.end()) {
1
Assuming the condition is false
2
Taking false branch
667 Action(llvm::make_error<llvm::StringError>(
668 "trying to get AST for non-added document",
669 llvm::errc::invalid_argument));
670 return;
671 }
672
673 It->second->Worker->runWithAST(Name, std::move(Action));
3
Calling 'ASTWorker::runWithAST'
674}
675
676void TUScheduler::runWithPreamble(
677 llvm::StringRef Name, PathRef File,
678 llvm::unique_function<void(llvm::Expected<InputsAndPreamble>)> Action) {
679 auto It = Files.find(File);
680 if (It == Files.end()) {
681 Action(llvm::make_error<llvm::StringError>(
682 "trying to get preamble for non-added document",
683 llvm::errc::invalid_argument));
684 return;
685 }
686
687 if (!PreambleTasks) {
688 trace::Span Tracer(Name);
689 SPAN_ATTACH(Tracer, "file", File)do { if (auto *Args = (Tracer).Args) (*Args)["file"] = File; }
while (0)
;
690 std::shared_ptr<const PreambleData> Preamble =
691 It->second->Worker->getPossiblyStalePreamble();
692 Action(InputsAndPreamble{It->second->Contents, It->second->Command,
693 Preamble.get()});
694 return;
695 }
696
697 std::shared_ptr<const ASTWorker> Worker = It->second->Worker.lock();
698 auto Task = [Worker, this](std::string Name, std::string File,
699 std::string Contents,
700 tooling::CompileCommand Command, Context Ctx,
701 decltype(Action) Action) mutable {
702 // We don't want to be running preamble actions before the preamble was
703 // built for the first time. This avoids extra work of processing the
704 // preamble headers in parallel multiple times.
705 Worker->waitForFirstPreamble();
706
707 std::lock_guard<Semaphore> BarrierLock(Barrier);
708 WithContext Guard(std::move(Ctx));
709 trace::Span Tracer(Name);
710 SPAN_ATTACH(Tracer, "file", File)do { if (auto *Args = (Tracer).Args) (*Args)["file"] = File; }
while (0)
;
711 std::shared_ptr<const PreambleData> Preamble =
712 Worker->getPossiblyStalePreamble();
713 Action(InputsAndPreamble{Contents, Command, Preamble.get()});
714 };
715
716 PreambleTasks->runAsync("task:" + llvm::sys::path::filename(File),
717 Bind(Task, std::string(Name), std::string(File),
718 It->second->Contents, It->second->Command,
719 Context::current().clone(), std::move(Action)));
720}
721
722std::vector<std::pair<Path, std::size_t>>
723TUScheduler::getUsedBytesPerFile() const {
724 std::vector<std::pair<Path, std::size_t>> Result;
725 Result.reserve(Files.size());
726 for (auto &&PathAndFile : Files)
727 Result.push_back(
728 {PathAndFile.first(), PathAndFile.second->Worker->getUsedBytes()});
729 return Result;
730}
731
732std::vector<Path> TUScheduler::getFilesWithCachedAST() const {
733 std::vector<Path> Result;
734 for (auto &&PathAndFile : Files) {
735 if (!PathAndFile.second->Worker->isASTCached())
736 continue;
737 Result.push_back(PathAndFile.first());
738 }
739 return Result;
740}
741
742} // namespace clangd
743} // namespace clang

/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/Function.h

1//===--- Function.h - Utility callable wrappers -----------------*- C++-*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides utilities for callable objects.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_FUNCTION_H
15#define LLVM_CLANG_TOOLS_EXTRA_CLANGD_FUNCTION_H
16
17#include "llvm/ADT/FunctionExtras.h"
18#include "llvm/Support/Error.h"
19#include <tuple>
20#include <utility>
21
22namespace clang {
23namespace clangd {
24
25/// A Callback<T> is a void function that accepts Expected<T>.
26/// This is accepted by ClangdServer functions that logically return T.
27template <typename T>
28using Callback = llvm::unique_function<void(llvm::Expected<T>)>;
29
30/// Stores a callable object (Func) and arguments (Args) and allows to call the
31/// callable with provided arguments later using `operator ()`. The arguments
32/// are std::forward'ed into the callable in the body of `operator()`. Therefore
33/// `operator()` can only be called once, as some of the arguments could be
34/// std::move'ed into the callable on first call.
35template <class Func, class... Args> struct ForwardBinder {
36 using Tuple = std::tuple<typename std::decay<Func>::type,
37 typename std::decay<Args>::type...>;
38 Tuple FuncWithArguments;
39#ifndef NDEBUG
40 bool WasCalled = false;
41#endif
42
43public:
44 ForwardBinder(Tuple FuncWithArguments)
45 : FuncWithArguments(std::move(FuncWithArguments)) {}
46
47private:
48 template <std::size_t... Indexes, class... RestArgs>
49 auto CallImpl(llvm::integer_sequence<std::size_t, Indexes...> Seq,
50 RestArgs &&... Rest)
51 -> decltype(std::get<0>(this->FuncWithArguments)(
52 std::forward<Args>(std::get<Indexes + 1>(this->FuncWithArguments))...,
53 std::forward<RestArgs>(Rest)...)) {
54 return std::get<0>(this->FuncWithArguments)(
55 std::forward<Args>(std::get<Indexes + 1>(this->FuncWithArguments))...,
56 std::forward<RestArgs>(Rest)...);
57 }
58
59public:
60 template <class... RestArgs>
61 auto operator()(RestArgs &&... Rest)
62 -> decltype(this->CallImpl(llvm::index_sequence_for<Args...>(),
63 std::forward<RestArgs>(Rest)...)) {
64
65#ifndef NDEBUG
66 assert(!WasCalled && "Can only call result of Bind once.")(static_cast <bool> (!WasCalled && "Can only call result of Bind once."
) ? void (0) : __assert_fail ("!WasCalled && \"Can only call result of Bind once.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/Function.h"
, 66, __extension__ __PRETTY_FUNCTION__))
;
67 WasCalled = true;
68#endif
69 return CallImpl(llvm::index_sequence_for<Args...>(),
70 std::forward<RestArgs>(Rest)...);
71 }
72};
73
74/// Creates an object that stores a callable (\p F) and first arguments to the
75/// callable (\p As) and allows to call \p F with \Args at a later point.
76/// Similar to std::bind, but also works with move-only \p F and \p As.
77///
78/// The returned object must be called no more than once, as \p As are
79/// std::forwarded'ed (therefore can be moved) into \p F during the call.
80template <class Func, class... Args>
81ForwardBinder<Func, Args...> Bind(Func F, Args &&... As) {
82 return ForwardBinder<Func, Args...>(
83 std::make_tuple(std::forward<Func>(F), std::forward<Args>(As)...));
5
Calling 'make_tuple<(lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
22
Returning from 'make_tuple<(lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
23
Calling implicit destructor for 'tuple<(lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
24
Calling implicit destructor for '_Tuple_impl<0, (lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
25
Calling implicit destructor for '_Tuple_impl<1, llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
26
Calling implicit destructor for '_Head_base<1, llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>, false>'
27
Calling '~unique_function'
84}
85
86} // namespace clangd
87} // namespace clang
88
89#endif

/usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/tuple

1// <tuple> -*- C++ -*-
2
3// Copyright (C) 2007-2018 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/tuple
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_TUPLE1
30#define _GLIBCXX_TUPLE1 1
31
32#pragma GCC system_header
33
34#if __cplusplus201103L < 201103L
35# include <bits/c++0x_warning.h>
36#else
37
38#include <utility>
39#include <array>
40#include <bits/uses_allocator.h>
41#include <bits/invoke.h>
42
43namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @addtogroup utilities
49 * @{
50 */
51
52 template<typename... _Elements>
53 class tuple;
54
55 template<typename _Tp>
56 struct __is_empty_non_tuple : is_empty<_Tp> { };
57
58 // Using EBO for elements that are tuples causes ambiguous base errors.
59 template<typename _El0, typename... _El>
60 struct __is_empty_non_tuple<tuple<_El0, _El...>> : false_type { };
61
62 // Use the Empty Base-class Optimization for empty, non-final types.
63 template<typename _Tp>
64 using __empty_not_final
65 = typename conditional<__is_final(_Tp), false_type,
66 __is_empty_non_tuple<_Tp>>::type;
67
68 template<std::size_t _Idx, typename _Head,
69 bool = __empty_not_final<_Head>::value>
70 struct _Head_base;
71
72 template<std::size_t _Idx, typename _Head>
73 struct _Head_base<_Idx, _Head, true>
74 : public _Head
75 {
76 constexpr _Head_base()
77 : _Head() { }
78
79 constexpr _Head_base(const _Head& __h)
80 : _Head(__h) { }
81
82 constexpr _Head_base(const _Head_base&) = default;
83 constexpr _Head_base(_Head_base&&) = default;
84
85 template<typename _UHead>
86 constexpr _Head_base(_UHead&& __h)
87 : _Head(std::forward<_UHead>(__h)) { }
88
89 _Head_base(allocator_arg_t, __uses_alloc0)
90 : _Head() { }
91
92 template<typename _Alloc>
93 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
94 : _Head(allocator_arg, *__a._M_a) { }
95
96 template<typename _Alloc>
97 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
98 : _Head(*__a._M_a) { }
99
100 template<typename _UHead>
101 _Head_base(__uses_alloc0, _UHead&& __uhead)
102 : _Head(std::forward<_UHead>(__uhead)) { }
103
104 template<typename _Alloc, typename _UHead>
105 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
106 : _Head(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead)) { }
107
108 template<typename _Alloc, typename _UHead>
109 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
110 : _Head(std::forward<_UHead>(__uhead), *__a._M_a) { }
111
112 static constexpr _Head&
113 _M_head(_Head_base& __b) noexcept { return __b; }
114
115 static constexpr const _Head&
116 _M_head(const _Head_base& __b) noexcept { return __b; }
117 };
118
119 template<std::size_t _Idx, typename _Head>
120 struct _Head_base<_Idx, _Head, false>
121 {
122 constexpr _Head_base()
123 : _M_head_impl() { }
124
125 constexpr _Head_base(const _Head& __h)
126 : _M_head_impl(__h) { }
127
128 constexpr _Head_base(const _Head_base&) = default;
129 constexpr _Head_base(_Head_base&&) = default;
130
131 template<typename _UHead>
132 constexpr _Head_base(_UHead&& __h)
133 : _M_head_impl(std::forward<_UHead>(__h)) { }
10
Calling move constructor for 'unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>'
13
Returning from move constructor for 'unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>'
14
Returning without writing to 'this->_M_head_impl.StorageUnion.OutOfLineStorage.StoragePtr'
134
135 _Head_base(allocator_arg_t, __uses_alloc0)
136 : _M_head_impl() { }
137
138 template<typename _Alloc>
139 _Head_base(allocator_arg_t, __uses_alloc1<_Alloc> __a)
140 : _M_head_impl(allocator_arg, *__a._M_a) { }
141
142 template<typename _Alloc>
143 _Head_base(allocator_arg_t, __uses_alloc2<_Alloc> __a)
144 : _M_head_impl(*__a._M_a) { }
145
146 template<typename _UHead>
147 _Head_base(__uses_alloc0, _UHead&& __uhead)
148 : _M_head_impl(std::forward<_UHead>(__uhead)) { }
149
150 template<typename _Alloc, typename _UHead>
151 _Head_base(__uses_alloc1<_Alloc> __a, _UHead&& __uhead)
152 : _M_head_impl(allocator_arg, *__a._M_a, std::forward<_UHead>(__uhead))
153 { }
154
155 template<typename _Alloc, typename _UHead>
156 _Head_base(__uses_alloc2<_Alloc> __a, _UHead&& __uhead)
157 : _M_head_impl(std::forward<_UHead>(__uhead), *__a._M_a) { }
158
159 static constexpr _Head&
160 _M_head(_Head_base& __b) noexcept { return __b._M_head_impl; }
161
162 static constexpr const _Head&
163 _M_head(const _Head_base& __b) noexcept { return __b._M_head_impl; }
164
165 _Head _M_head_impl;
166 };
167
168 /**
169 * Contains the actual implementation of the @c tuple template, stored
170 * as a recursive inheritance hierarchy from the first element (most
171 * derived class) to the last (least derived class). The @c Idx
172 * parameter gives the 0-based index of the element stored at this
173 * point in the hierarchy; we use it to implement a constant-time
174 * get() operation.
175 */
176 template<std::size_t _Idx, typename... _Elements>
177 struct _Tuple_impl;
178
179 /**
180 * Recursive tuple implementation. Here we store the @c Head element
181 * and derive from a @c Tuple_impl containing the remaining elements
182 * (which contains the @c Tail).
183 */
184 template<std::size_t _Idx, typename _Head, typename... _Tail>
185 struct _Tuple_impl<_Idx, _Head, _Tail...>
186 : public _Tuple_impl<_Idx + 1, _Tail...>,
187 private _Head_base<_Idx, _Head>
188 {
189 template<std::size_t, typename...> friend class _Tuple_impl;
190
191 typedef _Tuple_impl<_Idx + 1, _Tail...> _Inherited;
192 typedef _Head_base<_Idx, _Head> _Base;
193
194 static constexpr _Head&
195 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
196
197 static constexpr const _Head&
198 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
199
200 static constexpr _Inherited&
201 _M_tail(_Tuple_impl& __t) noexcept { return __t; }
202
203 static constexpr const _Inherited&
204 _M_tail(const _Tuple_impl& __t) noexcept { return __t; }
205
206 constexpr _Tuple_impl()
207 : _Inherited(), _Base() { }
208
209 explicit
210 constexpr _Tuple_impl(const _Head& __head, const _Tail&... __tail)
211 : _Inherited(__tail...), _Base(__head) { }
212
213 template<typename _UHead, typename... _UTail, typename = typename
214 enable_if<sizeof...(_Tail) == sizeof...(_UTail)>::type>
215 explicit
216 constexpr _Tuple_impl(_UHead&& __head, _UTail&&... __tail)
217 : _Inherited(std::forward<_UTail>(__tail)...),
8
Calling constructor for '_Tuple_impl<1, llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
17
Returning from constructor for '_Tuple_impl<1, llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
218 _Base(std::forward<_UHead>(__head)) { }
18
Returning without writing to 'this->_M_head_impl.StorageUnion.OutOfLineStorage.StoragePtr'
219
220 constexpr _Tuple_impl(const _Tuple_impl&) = default;
221
222 constexpr
223 _Tuple_impl(_Tuple_impl&& __in)
224 noexcept(__and_<is_nothrow_move_constructible<_Head>,
225 is_nothrow_move_constructible<_Inherited>>::value)
226 : _Inherited(std::move(_M_tail(__in))),
227 _Base(std::forward<_Head>(_M_head(__in))) { }
228
229 template<typename... _UElements>
230 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UElements...>& __in)
231 : _Inherited(_Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
232 _Base(_Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
233
234 template<typename _UHead, typename... _UTails>
235 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
236 : _Inherited(std::move
237 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
238 _Base(std::forward<_UHead>
239 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
240
241 template<typename _Alloc>
242 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
243 : _Inherited(__tag, __a),
244 _Base(__tag, __use_alloc<_Head>(__a)) { }
245
246 template<typename _Alloc>
247 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
248 const _Head& __head, const _Tail&... __tail)
249 : _Inherited(__tag, __a, __tail...),
250 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
251
252 template<typename _Alloc, typename _UHead, typename... _UTail,
253 typename = typename enable_if<sizeof...(_Tail)
254 == sizeof...(_UTail)>::type>
255 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
256 _UHead&& __head, _UTail&&... __tail)
257 : _Inherited(__tag, __a, std::forward<_UTail>(__tail)...),
258 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
259 std::forward<_UHead>(__head)) { }
260
261 template<typename _Alloc>
262 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
263 const _Tuple_impl& __in)
264 : _Inherited(__tag, __a, _M_tail(__in)),
265 _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
266
267 template<typename _Alloc>
268 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
269 _Tuple_impl&& __in)
270 : _Inherited(__tag, __a, std::move(_M_tail(__in))),
271 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
272 std::forward<_Head>(_M_head(__in))) { }
273
274 template<typename _Alloc, typename... _UElements>
275 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
276 const _Tuple_impl<_Idx, _UElements...>& __in)
277 : _Inherited(__tag, __a,
278 _Tuple_impl<_Idx, _UElements...>::_M_tail(__in)),
279 _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
280 _Tuple_impl<_Idx, _UElements...>::_M_head(__in)) { }
281
282 template<typename _Alloc, typename _UHead, typename... _UTails>
283 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
284 _Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
285 : _Inherited(__tag, __a, std::move
286 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in))),
287 _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
288 std::forward<_UHead>
289 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in))) { }
290
291 _Tuple_impl&
292 operator=(const _Tuple_impl& __in)
293 {
294 _M_head(*this) = _M_head(__in);
295 _M_tail(*this) = _M_tail(__in);
296 return *this;
297 }
298
299 _Tuple_impl&
300 operator=(_Tuple_impl&& __in)
301 noexcept(__and_<is_nothrow_move_assignable<_Head>,
302 is_nothrow_move_assignable<_Inherited>>::value)
303 {
304 _M_head(*this) = std::forward<_Head>(_M_head(__in));
305 _M_tail(*this) = std::move(_M_tail(__in));
306 return *this;
307 }
308
309 template<typename... _UElements>
310 _Tuple_impl&
311 operator=(const _Tuple_impl<_Idx, _UElements...>& __in)
312 {
313 _M_head(*this) = _Tuple_impl<_Idx, _UElements...>::_M_head(__in);
314 _M_tail(*this) = _Tuple_impl<_Idx, _UElements...>::_M_tail(__in);
315 return *this;
316 }
317
318 template<typename _UHead, typename... _UTails>
319 _Tuple_impl&
320 operator=(_Tuple_impl<_Idx, _UHead, _UTails...>&& __in)
321 {
322 _M_head(*this) = std::forward<_UHead>
323 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_head(__in));
324 _M_tail(*this) = std::move
325 (_Tuple_impl<_Idx, _UHead, _UTails...>::_M_tail(__in));
326 return *this;
327 }
328
329 protected:
330 void
331 _M_swap(_Tuple_impl& __in)
332 noexcept(__is_nothrow_swappable<_Head>::value
333 && noexcept(_M_tail(__in)._M_swap(_M_tail(__in))))
334 {
335 using std::swap;
336 swap(_M_head(*this), _M_head(__in));
337 _Inherited::_M_swap(_M_tail(__in));
338 }
339 };
340
341 // Basis case of inheritance recursion.
342 template<std::size_t _Idx, typename _Head>
343 struct _Tuple_impl<_Idx, _Head>
344 : private _Head_base<_Idx, _Head>
345 {
346 template<std::size_t, typename...> friend class _Tuple_impl;
347
348 typedef _Head_base<_Idx, _Head> _Base;
349
350 static constexpr _Head&
351 _M_head(_Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
352
353 static constexpr const _Head&
354 _M_head(const _Tuple_impl& __t) noexcept { return _Base::_M_head(__t); }
355
356 constexpr _Tuple_impl()
357 : _Base() { }
358
359 explicit
360 constexpr _Tuple_impl(const _Head& __head)
361 : _Base(__head) { }
362
363 template<typename _UHead>
364 explicit
365 constexpr _Tuple_impl(_UHead&& __head)
366 : _Base(std::forward<_UHead>(__head)) { }
9
Calling constructor for '_Head_base<1, llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>, false>'
15
Returning from constructor for '_Head_base<1, llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>, false>'
16
Returning without writing to 'this->_M_head_impl.StorageUnion.OutOfLineStorage.StoragePtr'
367
368 constexpr _Tuple_impl(const _Tuple_impl&) = default;
369
370 constexpr
371 _Tuple_impl(_Tuple_impl&& __in)
372 noexcept(is_nothrow_move_constructible<_Head>::value)
373 : _Base(std::forward<_Head>(_M_head(__in))) { }
374
375 template<typename _UHead>
376 constexpr _Tuple_impl(const _Tuple_impl<_Idx, _UHead>& __in)
377 : _Base(_Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
378
379 template<typename _UHead>
380 constexpr _Tuple_impl(_Tuple_impl<_Idx, _UHead>&& __in)
381 : _Base(std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
382 { }
383
384 template<typename _Alloc>
385 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a)
386 : _Base(__tag, __use_alloc<_Head>(__a)) { }
387
388 template<typename _Alloc>
389 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
390 const _Head& __head)
391 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), __head) { }
392
393 template<typename _Alloc, typename _UHead>
394 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
395 _UHead&& __head)
396 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
397 std::forward<_UHead>(__head)) { }
398
399 template<typename _Alloc>
400 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
401 const _Tuple_impl& __in)
402 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a), _M_head(__in)) { }
403
404 template<typename _Alloc>
405 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
406 _Tuple_impl&& __in)
407 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
408 std::forward<_Head>(_M_head(__in))) { }
409
410 template<typename _Alloc, typename _UHead>
411 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
412 const _Tuple_impl<_Idx, _UHead>& __in)
413 : _Base(__use_alloc<_Head, _Alloc, _Head>(__a),
414 _Tuple_impl<_Idx, _UHead>::_M_head(__in)) { }
415
416 template<typename _Alloc, typename _UHead>
417 _Tuple_impl(allocator_arg_t __tag, const _Alloc& __a,
418 _Tuple_impl<_Idx, _UHead>&& __in)
419 : _Base(__use_alloc<_Head, _Alloc, _UHead>(__a),
420 std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in)))
421 { }
422
423 _Tuple_impl&
424 operator=(const _Tuple_impl& __in)
425 {
426 _M_head(*this) = _M_head(__in);
427 return *this;
428 }
429
430 _Tuple_impl&
431 operator=(_Tuple_impl&& __in)
432 noexcept(is_nothrow_move_assignable<_Head>::value)
433 {
434 _M_head(*this) = std::forward<_Head>(_M_head(__in));
435 return *this;
436 }
437
438 template<typename _UHead>
439 _Tuple_impl&
440 operator=(const _Tuple_impl<_Idx, _UHead>& __in)
441 {
442 _M_head(*this) = _Tuple_impl<_Idx, _UHead>::_M_head(__in);
443 return *this;
444 }
445
446 template<typename _UHead>
447 _Tuple_impl&
448 operator=(_Tuple_impl<_Idx, _UHead>&& __in)
449 {
450 _M_head(*this)
451 = std::forward<_UHead>(_Tuple_impl<_Idx, _UHead>::_M_head(__in));
452 return *this;
453 }
454
455 protected:
456 void
457 _M_swap(_Tuple_impl& __in)
458 noexcept(__is_nothrow_swappable<_Head>::value)
459 {
460 using std::swap;
461 swap(_M_head(*this), _M_head(__in));
462 }
463 };
464
465 // Concept utility functions, reused in conditionally-explicit
466 // constructors.
467 template<bool, typename... _Elements>
468 struct _TC
469 {
470 template<typename... _UElements>
471 static constexpr bool _ConstructibleTuple()
472 {
473 return __and_<is_constructible<_Elements, const _UElements&>...>::value;
474 }
475
476 template<typename... _UElements>
477 static constexpr bool _ImplicitlyConvertibleTuple()
478 {
479 return __and_<is_convertible<const _UElements&, _Elements>...>::value;
480 }
481
482 template<typename... _UElements>
483 static constexpr bool _MoveConstructibleTuple()
484 {
485 return __and_<is_constructible<_Elements, _UElements&&>...>::value;
486 }
487
488 template<typename... _UElements>
489 static constexpr bool _ImplicitlyMoveConvertibleTuple()
490 {
491 return __and_<is_convertible<_UElements&&, _Elements>...>::value;
492 }
493
494 template<typename _SrcTuple>
495 static constexpr bool _NonNestedTuple()
496 {
497 return __and_<__not_<is_same<tuple<_Elements...>,
498 typename remove_cv<
499 typename remove_reference<_SrcTuple>::type
500 >::type>>,
501 __not_<is_convertible<_SrcTuple, _Elements...>>,
502 __not_<is_constructible<_Elements..., _SrcTuple>>
503 >::value;
504 }
505 template<typename... _UElements>
506 static constexpr bool _NotSameTuple()
507 {
508 return __not_<is_same<tuple<_Elements...>,
509 typename remove_const<
510 typename remove_reference<_UElements...>::type
511 >::type>>::value;
512 }
513 };
514
515 template<typename... _Elements>
516 struct _TC<false, _Elements...>
517 {
518 template<typename... _UElements>
519 static constexpr bool _ConstructibleTuple()
520 {
521 return false;
522 }
523
524 template<typename... _UElements>
525 static constexpr bool _ImplicitlyConvertibleTuple()
526 {
527 return false;
528 }
529
530 template<typename... _UElements>
531 static constexpr bool _MoveConstructibleTuple()
532 {
533 return false;
534 }
535
536 template<typename... _UElements>
537 static constexpr bool _ImplicitlyMoveConvertibleTuple()
538 {
539 return false;
540 }
541
542 template<typename... _UElements>
543 static constexpr bool _NonNestedTuple()
544 {
545 return true;
546 }
547 template<typename... _UElements>
548 static constexpr bool _NotSameTuple()
549 {
550 return true;
551 }
552 };
553
554 /// Primary class template, tuple
555 template<typename... _Elements>
556 class tuple : public _Tuple_impl<0, _Elements...>
557 {
558 typedef _Tuple_impl<0, _Elements...> _Inherited;
559
560 // Used for constraining the default constructor so
561 // that it becomes dependent on the constraints.
562 template<typename _Dummy>
563 struct _TC2
564 {
565 static constexpr bool _DefaultConstructibleTuple()
566 {
567 return __and_<is_default_constructible<_Elements>...>::value;
568 }
569 static constexpr bool _ImplicitlyDefaultConstructibleTuple()
570 {
571 return __and_<__is_implicitly_default_constructible<_Elements>...>
572 ::value;
573 }
574 };
575
576 public:
577 template<typename _Dummy = void,
578 typename enable_if<_TC2<_Dummy>::
579 _ImplicitlyDefaultConstructibleTuple(),
580 bool>::type = true>
581 constexpr tuple()
582 : _Inherited() { }
583
584 template<typename _Dummy = void,
585 typename enable_if<_TC2<_Dummy>::
586 _DefaultConstructibleTuple()
587 &&
588 !_TC2<_Dummy>::
589 _ImplicitlyDefaultConstructibleTuple(),
590 bool>::type = false>
591 explicit constexpr tuple()
592 : _Inherited() { }
593
594 // Shortcut for the cases where constructors taking _Elements...
595 // need to be constrained.
596 template<typename _Dummy> using _TCC =
597 _TC<is_same<_Dummy, void>::value,
598 _Elements...>;
599
600 template<typename _Dummy = void,
601 typename enable_if<
602 _TCC<_Dummy>::template
603 _ConstructibleTuple<_Elements...>()
604 && _TCC<_Dummy>::template
605 _ImplicitlyConvertibleTuple<_Elements...>()
606 && (sizeof...(_Elements) >= 1),
607 bool>::type=true>
608 constexpr tuple(const _Elements&... __elements)
609 : _Inherited(__elements...) { }
610
611 template<typename _Dummy = void,
612 typename enable_if<
613 _TCC<_Dummy>::template
614 _ConstructibleTuple<_Elements...>()
615 && !_TCC<_Dummy>::template
616 _ImplicitlyConvertibleTuple<_Elements...>()
617 && (sizeof...(_Elements) >= 1),
618 bool>::type=false>
619 explicit constexpr tuple(const _Elements&... __elements)
620 : _Inherited(__elements...) { }
621
622 // Shortcut for the cases where constructors taking _UElements...
623 // need to be constrained.
624 template<typename... _UElements> using _TMC =
625 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
626 && (_TC<(sizeof...(_UElements)==1), _Elements...>::
627 template _NotSameTuple<_UElements...>()),
628 _Elements...>;
629
630 // Shortcut for the cases where constructors taking tuple<_UElements...>
631 // need to be constrained.
632 template<typename... _UElements> using _TMCT =
633 _TC<(sizeof...(_Elements) == sizeof...(_UElements))
634 && !is_same<tuple<_Elements...>,
635 tuple<_UElements...>>::value,
636 _Elements...>;
637
638 template<typename... _UElements, typename
639 enable_if<
640 _TMC<_UElements...>::template
641 _MoveConstructibleTuple<_UElements...>()
642 && _TMC<_UElements...>::template
643 _ImplicitlyMoveConvertibleTuple<_UElements...>()
644 && (sizeof...(_Elements) >= 1),
645 bool>::type=true>
646 constexpr tuple(_UElements&&... __elements)
647 : _Inherited(std::forward<_UElements>(__elements)...) { }
648
649 template<typename... _UElements, typename
650 enable_if<
651 _TMC<_UElements...>::template
652 _MoveConstructibleTuple<_UElements...>()
653 && !_TMC<_UElements...>::template
654 _ImplicitlyMoveConvertibleTuple<_UElements...>()
655 && (sizeof...(_Elements) >= 1),
656 bool>::type=false>
657 explicit constexpr tuple(_UElements&&... __elements)
658 : _Inherited(std::forward<_UElements>(__elements)...) { }
659
660 constexpr tuple(const tuple&) = default;
661
662 constexpr tuple(tuple&&) = default;
663
664 // Shortcut for the cases where constructors taking tuples
665 // must avoid creating temporaries.
666 template<typename _Dummy> using _TNTC =
667 _TC<is_same<_Dummy, void>::value && sizeof...(_Elements) == 1,
668 _Elements...>;
669
670 template<typename... _UElements, typename _Dummy = void, typename
671 enable_if<_TMCT<_UElements...>::template
672 _ConstructibleTuple<_UElements...>()
673 && _TMCT<_UElements...>::template
674 _ImplicitlyConvertibleTuple<_UElements...>()
675 && _TNTC<_Dummy>::template
676 _NonNestedTuple<const tuple<_UElements...>&>(),
677 bool>::type=true>
678 constexpr tuple(const tuple<_UElements...>& __in)
679 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
680 { }
681
682 template<typename... _UElements, typename _Dummy = void, typename
683 enable_if<_TMCT<_UElements...>::template
684 _ConstructibleTuple<_UElements...>()
685 && !_TMCT<_UElements...>::template
686 _ImplicitlyConvertibleTuple<_UElements...>()
687 && _TNTC<_Dummy>::template
688 _NonNestedTuple<const tuple<_UElements...>&>(),
689 bool>::type=false>
690 explicit constexpr tuple(const tuple<_UElements...>& __in)
691 : _Inherited(static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
692 { }
693
694 template<typename... _UElements, typename _Dummy = void, typename
695 enable_if<_TMCT<_UElements...>::template
696 _MoveConstructibleTuple<_UElements...>()
697 && _TMCT<_UElements...>::template
698 _ImplicitlyMoveConvertibleTuple<_UElements...>()
699 && _TNTC<_Dummy>::template
700 _NonNestedTuple<tuple<_UElements...>&&>(),
701 bool>::type=true>
702 constexpr tuple(tuple<_UElements...>&& __in)
703 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
704
705 template<typename... _UElements, typename _Dummy = void, typename
706 enable_if<_TMCT<_UElements...>::template
707 _MoveConstructibleTuple<_UElements...>()
708 && !_TMCT<_UElements...>::template
709 _ImplicitlyMoveConvertibleTuple<_UElements...>()
710 && _TNTC<_Dummy>::template
711 _NonNestedTuple<tuple<_UElements...>&&>(),
712 bool>::type=false>
713 explicit constexpr tuple(tuple<_UElements...>&& __in)
714 : _Inherited(static_cast<_Tuple_impl<0, _UElements...>&&>(__in)) { }
715
716 // Allocator-extended constructors.
717
718 template<typename _Alloc>
719 tuple(allocator_arg_t __tag, const _Alloc& __a)
720 : _Inherited(__tag, __a) { }
721
722 template<typename _Alloc, typename _Dummy = void,
723 typename enable_if<
724 _TCC<_Dummy>::template
725 _ConstructibleTuple<_Elements...>()
726 && _TCC<_Dummy>::template
727 _ImplicitlyConvertibleTuple<_Elements...>(),
728 bool>::type=true>
729 tuple(allocator_arg_t __tag, const _Alloc& __a,
730 const _Elements&... __elements)
731 : _Inherited(__tag, __a, __elements...) { }
732
733 template<typename _Alloc, typename _Dummy = void,
734 typename enable_if<
735 _TCC<_Dummy>::template
736 _ConstructibleTuple<_Elements...>()
737 && !_TCC<_Dummy>::template
738 _ImplicitlyConvertibleTuple<_Elements...>(),
739 bool>::type=false>
740 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
741 const _Elements&... __elements)
742 : _Inherited(__tag, __a, __elements...) { }
743
744 template<typename _Alloc, typename... _UElements, typename
745 enable_if<_TMC<_UElements...>::template
746 _MoveConstructibleTuple<_UElements...>()
747 && _TMC<_UElements...>::template
748 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
749 bool>::type=true>
750 tuple(allocator_arg_t __tag, const _Alloc& __a,
751 _UElements&&... __elements)
752 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
753 { }
754
755 template<typename _Alloc, typename... _UElements, typename
756 enable_if<_TMC<_UElements...>::template
757 _MoveConstructibleTuple<_UElements...>()
758 && !_TMC<_UElements...>::template
759 _ImplicitlyMoveConvertibleTuple<_UElements...>(),
760 bool>::type=false>
761 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
762 _UElements&&... __elements)
763 : _Inherited(__tag, __a, std::forward<_UElements>(__elements)...)
764 { }
765
766 template<typename _Alloc>
767 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
768 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
769
770 template<typename _Alloc>
771 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
772 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
773
774 template<typename _Alloc, typename _Dummy = void,
775 typename... _UElements, typename
776 enable_if<_TMCT<_UElements...>::template
777 _ConstructibleTuple<_UElements...>()
778 && _TMCT<_UElements...>::template
779 _ImplicitlyConvertibleTuple<_UElements...>()
780 && _TNTC<_Dummy>::template
781 _NonNestedTuple<tuple<_UElements...>&&>(),
782 bool>::type=true>
783 tuple(allocator_arg_t __tag, const _Alloc& __a,
784 const tuple<_UElements...>& __in)
785 : _Inherited(__tag, __a,
786 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
787 { }
788
789 template<typename _Alloc, typename _Dummy = void,
790 typename... _UElements, typename
791 enable_if<_TMCT<_UElements...>::template
792 _ConstructibleTuple<_UElements...>()
793 && !_TMCT<_UElements...>::template
794 _ImplicitlyConvertibleTuple<_UElements...>()
795 && _TNTC<_Dummy>::template
796 _NonNestedTuple<tuple<_UElements...>&&>(),
797 bool>::type=false>
798 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
799 const tuple<_UElements...>& __in)
800 : _Inherited(__tag, __a,
801 static_cast<const _Tuple_impl<0, _UElements...>&>(__in))
802 { }
803
804 template<typename _Alloc, typename _Dummy = void,
805 typename... _UElements, typename
806 enable_if<_TMCT<_UElements...>::template
807 _MoveConstructibleTuple<_UElements...>()
808 && _TMCT<_UElements...>::template
809 _ImplicitlyMoveConvertibleTuple<_UElements...>()
810 && _TNTC<_Dummy>::template
811 _NonNestedTuple<tuple<_UElements...>&&>(),
812 bool>::type=true>
813 tuple(allocator_arg_t __tag, const _Alloc& __a,
814 tuple<_UElements...>&& __in)
815 : _Inherited(__tag, __a,
816 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
817 { }
818
819 template<typename _Alloc, typename _Dummy = void,
820 typename... _UElements, typename
821 enable_if<_TMCT<_UElements...>::template
822 _MoveConstructibleTuple<_UElements...>()
823 && !_TMCT<_UElements...>::template
824 _ImplicitlyMoveConvertibleTuple<_UElements...>()
825 && _TNTC<_Dummy>::template
826 _NonNestedTuple<tuple<_UElements...>&&>(),
827 bool>::type=false>
828 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
829 tuple<_UElements...>&& __in)
830 : _Inherited(__tag, __a,
831 static_cast<_Tuple_impl<0, _UElements...>&&>(__in))
832 { }
833
834 tuple&
835 operator=(const tuple& __in)
836 {
837 static_cast<_Inherited&>(*this) = __in;
838 return *this;
839 }
840
841 tuple&
842 operator=(tuple&& __in)
843 noexcept(is_nothrow_move_assignable<_Inherited>::value)
844 {
845 static_cast<_Inherited&>(*this) = std::move(__in);
846 return *this;
847 }
848
849 template<typename... _UElements>
850 typename
851 enable_if<sizeof...(_UElements)
852 == sizeof...(_Elements), tuple&>::type
853 operator=(const tuple<_UElements...>& __in)
854 {
855 static_cast<_Inherited&>(*this) = __in;
856 return *this;
857 }
858
859 template<typename... _UElements>
860 typename
861 enable_if<sizeof...(_UElements)
862 == sizeof...(_Elements), tuple&>::type
863 operator=(tuple<_UElements...>&& __in)
864 {
865 static_cast<_Inherited&>(*this) = std::move(__in);
866 return *this;
867 }
868
869 void
870 swap(tuple& __in)
871 noexcept(noexcept(__in._M_swap(__in)))
872 { _Inherited::_M_swap(__in); }
873 };
874
875#if __cpp_deduction_guides >= 201606
876 template<typename... _UTypes>
877 tuple(_UTypes...) -> tuple<_UTypes...>;
878 template<typename _T1, typename _T2>
879 tuple(pair<_T1, _T2>) -> tuple<_T1, _T2>;
880 template<typename _Alloc, typename... _UTypes>
881 tuple(allocator_arg_t, _Alloc, _UTypes...) -> tuple<_UTypes...>;
882 template<typename _Alloc, typename _T1, typename _T2>
883 tuple(allocator_arg_t, _Alloc, pair<_T1, _T2>) -> tuple<_T1, _T2>;
884 template<typename _Alloc, typename... _UTypes>
885 tuple(allocator_arg_t, _Alloc, tuple<_UTypes...>) -> tuple<_UTypes...>;
886#endif
887
888 // Explicit specialization, zero-element tuple.
889 template<>
890 class tuple<>
891 {
892 public:
893 void swap(tuple&) noexcept { /* no-op */ }
894 // We need the default since we're going to define no-op
895 // allocator constructors.
896 tuple() = default;
897 // No-op allocator constructors.
898 template<typename _Alloc>
899 tuple(allocator_arg_t, const _Alloc&) { }
900 template<typename _Alloc>
901 tuple(allocator_arg_t, const _Alloc&, const tuple&) { }
902 };
903
904 /// Partial specialization, 2-element tuple.
905 /// Includes construction and assignment from a pair.
906 template<typename _T1, typename _T2>
907 class tuple<_T1, _T2> : public _Tuple_impl<0, _T1, _T2>
908 {
909 typedef _Tuple_impl<0, _T1, _T2> _Inherited;
910
911 public:
912 template <typename _U1 = _T1,
913 typename _U2 = _T2,
914 typename enable_if<__and_<
915 __is_implicitly_default_constructible<_U1>,
916 __is_implicitly_default_constructible<_U2>>
917 ::value, bool>::type = true>
918
919 constexpr tuple()
920 : _Inherited() { }
921
922 template <typename _U1 = _T1,
923 typename _U2 = _T2,
924 typename enable_if<
925 __and_<
926 is_default_constructible<_U1>,
927 is_default_constructible<_U2>,
928 __not_<
929 __and_<__is_implicitly_default_constructible<_U1>,
930 __is_implicitly_default_constructible<_U2>>>>
931 ::value, bool>::type = false>
932
933 explicit constexpr tuple()
934 : _Inherited() { }
935
936 // Shortcut for the cases where constructors taking _T1, _T2
937 // need to be constrained.
938 template<typename _Dummy> using _TCC =
939 _TC<is_same<_Dummy, void>::value, _T1, _T2>;
940
941 template<typename _Dummy = void, typename
942 enable_if<_TCC<_Dummy>::template
943 _ConstructibleTuple<_T1, _T2>()
944 && _TCC<_Dummy>::template
945 _ImplicitlyConvertibleTuple<_T1, _T2>(),
946 bool>::type = true>
947 constexpr tuple(const _T1& __a1, const _T2& __a2)
948 : _Inherited(__a1, __a2) { }
949
950 template<typename _Dummy = void, typename
951 enable_if<_TCC<_Dummy>::template
952 _ConstructibleTuple<_T1, _T2>()
953 && !_TCC<_Dummy>::template
954 _ImplicitlyConvertibleTuple<_T1, _T2>(),
955 bool>::type = false>
956 explicit constexpr tuple(const _T1& __a1, const _T2& __a2)
957 : _Inherited(__a1, __a2) { }
958
959 // Shortcut for the cases where constructors taking _U1, _U2
960 // need to be constrained.
961 using _TMC = _TC<true, _T1, _T2>;
962
963 template<typename _U1, typename _U2, typename
964 enable_if<_TMC::template
965 _MoveConstructibleTuple<_U1, _U2>()
966 && _TMC::template
967 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
968 && !is_same<typename decay<_U1>::type,
969 allocator_arg_t>::value,
970 bool>::type = true>
971 constexpr tuple(_U1&& __a1, _U2&& __a2)
972 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
7
Calling constructor for '_Tuple_impl<0, (lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
19
Returning from constructor for '_Tuple_impl<0, (lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
20
Returning without writing to 'this->_M_head_impl.StorageUnion.OutOfLineStorage.StoragePtr'
973
974 template<typename _U1, typename _U2, typename
975 enable_if<_TMC::template
976 _MoveConstructibleTuple<_U1, _U2>()
977 && !_TMC::template
978 _ImplicitlyMoveConvertibleTuple<_U1, _U2>()
979 && !is_same<typename decay<_U1>::type,
980 allocator_arg_t>::value,
981 bool>::type = false>
982 explicit constexpr tuple(_U1&& __a1, _U2&& __a2)
983 : _Inherited(std::forward<_U1>(__a1), std::forward<_U2>(__a2)) { }
984
985 constexpr tuple(const tuple&) = default;
986
987 constexpr tuple(tuple&&) = default;
988
989 template<typename _U1, typename _U2, typename
990 enable_if<_TMC::template
991 _ConstructibleTuple<_U1, _U2>()
992 && _TMC::template
993 _ImplicitlyConvertibleTuple<_U1, _U2>(),
994 bool>::type = true>
995 constexpr tuple(const tuple<_U1, _U2>& __in)
996 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
997
998 template<typename _U1, typename _U2, typename
999 enable_if<_TMC::template
1000 _ConstructibleTuple<_U1, _U2>()
1001 && !_TMC::template
1002 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1003 bool>::type = false>
1004 explicit constexpr tuple(const tuple<_U1, _U2>& __in)
1005 : _Inherited(static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in)) { }
1006
1007 template<typename _U1, typename _U2, typename
1008 enable_if<_TMC::template
1009 _MoveConstructibleTuple<_U1, _U2>()
1010 && _TMC::template
1011 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1012 bool>::type = true>
1013 constexpr tuple(tuple<_U1, _U2>&& __in)
1014 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1015
1016 template<typename _U1, typename _U2, typename
1017 enable_if<_TMC::template
1018 _MoveConstructibleTuple<_U1, _U2>()
1019 && !_TMC::template
1020 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1021 bool>::type = false>
1022 explicit constexpr tuple(tuple<_U1, _U2>&& __in)
1023 : _Inherited(static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in)) { }
1024
1025 template<typename _U1, typename _U2, typename
1026 enable_if<_TMC::template
1027 _ConstructibleTuple<_U1, _U2>()
1028 && _TMC::template
1029 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1030 bool>::type = true>
1031 constexpr tuple(const pair<_U1, _U2>& __in)
1032 : _Inherited(__in.first, __in.second) { }
1033
1034 template<typename _U1, typename _U2, typename
1035 enable_if<_TMC::template
1036 _ConstructibleTuple<_U1, _U2>()
1037 && !_TMC::template
1038 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1039 bool>::type = false>
1040 explicit constexpr tuple(const pair<_U1, _U2>& __in)
1041 : _Inherited(__in.first, __in.second) { }
1042
1043 template<typename _U1, typename _U2, typename
1044 enable_if<_TMC::template
1045 _MoveConstructibleTuple<_U1, _U2>()
1046 && _TMC::template
1047 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1048 bool>::type = true>
1049 constexpr tuple(pair<_U1, _U2>&& __in)
1050 : _Inherited(std::forward<_U1>(__in.first),
1051 std::forward<_U2>(__in.second)) { }
1052
1053 template<typename _U1, typename _U2, typename
1054 enable_if<_TMC::template
1055 _MoveConstructibleTuple<_U1, _U2>()
1056 && !_TMC::template
1057 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1058 bool>::type = false>
1059 explicit constexpr tuple(pair<_U1, _U2>&& __in)
1060 : _Inherited(std::forward<_U1>(__in.first),
1061 std::forward<_U2>(__in.second)) { }
1062
1063 // Allocator-extended constructors.
1064
1065 template<typename _Alloc>
1066 tuple(allocator_arg_t __tag, const _Alloc& __a)
1067 : _Inherited(__tag, __a) { }
1068
1069 template<typename _Alloc, typename _Dummy = void,
1070 typename enable_if<
1071 _TCC<_Dummy>::template
1072 _ConstructibleTuple<_T1, _T2>()
1073 && _TCC<_Dummy>::template
1074 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1075 bool>::type=true>
1076
1077 tuple(allocator_arg_t __tag, const _Alloc& __a,
1078 const _T1& __a1, const _T2& __a2)
1079 : _Inherited(__tag, __a, __a1, __a2) { }
1080
1081 template<typename _Alloc, typename _Dummy = void,
1082 typename enable_if<
1083 _TCC<_Dummy>::template
1084 _ConstructibleTuple<_T1, _T2>()
1085 && !_TCC<_Dummy>::template
1086 _ImplicitlyConvertibleTuple<_T1, _T2>(),
1087 bool>::type=false>
1088
1089 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1090 const _T1& __a1, const _T2& __a2)
1091 : _Inherited(__tag, __a, __a1, __a2) { }
1092
1093 template<typename _Alloc, typename _U1, typename _U2, typename
1094 enable_if<_TMC::template
1095 _MoveConstructibleTuple<_U1, _U2>()
1096 && _TMC::template
1097 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1098 bool>::type = true>
1099 tuple(allocator_arg_t __tag, const _Alloc& __a, _U1&& __a1, _U2&& __a2)
1100 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1101 std::forward<_U2>(__a2)) { }
1102
1103 template<typename _Alloc, typename _U1, typename _U2, typename
1104 enable_if<_TMC::template
1105 _MoveConstructibleTuple<_U1, _U2>()
1106 && !_TMC::template
1107 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1108 bool>::type = false>
1109 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1110 _U1&& __a1, _U2&& __a2)
1111 : _Inherited(__tag, __a, std::forward<_U1>(__a1),
1112 std::forward<_U2>(__a2)) { }
1113
1114 template<typename _Alloc>
1115 tuple(allocator_arg_t __tag, const _Alloc& __a, const tuple& __in)
1116 : _Inherited(__tag, __a, static_cast<const _Inherited&>(__in)) { }
1117
1118 template<typename _Alloc>
1119 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple&& __in)
1120 : _Inherited(__tag, __a, static_cast<_Inherited&&>(__in)) { }
1121
1122 template<typename _Alloc, typename _U1, typename _U2, typename
1123 enable_if<_TMC::template
1124 _ConstructibleTuple<_U1, _U2>()
1125 && _TMC::template
1126 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1127 bool>::type = true>
1128 tuple(allocator_arg_t __tag, const _Alloc& __a,
1129 const tuple<_U1, _U2>& __in)
1130 : _Inherited(__tag, __a,
1131 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1132 { }
1133
1134 template<typename _Alloc, typename _U1, typename _U2, typename
1135 enable_if<_TMC::template
1136 _ConstructibleTuple<_U1, _U2>()
1137 && !_TMC::template
1138 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1139 bool>::type = false>
1140 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1141 const tuple<_U1, _U2>& __in)
1142 : _Inherited(__tag, __a,
1143 static_cast<const _Tuple_impl<0, _U1, _U2>&>(__in))
1144 { }
1145
1146 template<typename _Alloc, typename _U1, typename _U2, typename
1147 enable_if<_TMC::template
1148 _MoveConstructibleTuple<_U1, _U2>()
1149 && _TMC::template
1150 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1151 bool>::type = true>
1152 tuple(allocator_arg_t __tag, const _Alloc& __a, tuple<_U1, _U2>&& __in)
1153 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1154 { }
1155
1156 template<typename _Alloc, typename _U1, typename _U2, typename
1157 enable_if<_TMC::template
1158 _MoveConstructibleTuple<_U1, _U2>()
1159 && !_TMC::template
1160 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1161 bool>::type = false>
1162 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1163 tuple<_U1, _U2>&& __in)
1164 : _Inherited(__tag, __a, static_cast<_Tuple_impl<0, _U1, _U2>&&>(__in))
1165 { }
1166
1167 template<typename _Alloc, typename _U1, typename _U2, typename
1168 enable_if<_TMC::template
1169 _ConstructibleTuple<_U1, _U2>()
1170 && _TMC::template
1171 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1172 bool>::type = true>
1173 tuple(allocator_arg_t __tag, const _Alloc& __a,
1174 const pair<_U1, _U2>& __in)
1175 : _Inherited(__tag, __a, __in.first, __in.second) { }
1176
1177 template<typename _Alloc, typename _U1, typename _U2, typename
1178 enable_if<_TMC::template
1179 _ConstructibleTuple<_U1, _U2>()
1180 && !_TMC::template
1181 _ImplicitlyConvertibleTuple<_U1, _U2>(),
1182 bool>::type = false>
1183 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1184 const pair<_U1, _U2>& __in)
1185 : _Inherited(__tag, __a, __in.first, __in.second) { }
1186
1187 template<typename _Alloc, typename _U1, typename _U2, typename
1188 enable_if<_TMC::template
1189 _MoveConstructibleTuple<_U1, _U2>()
1190 && _TMC::template
1191 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1192 bool>::type = true>
1193 tuple(allocator_arg_t __tag, const _Alloc& __a, pair<_U1, _U2>&& __in)
1194 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1195 std::forward<_U2>(__in.second)) { }
1196
1197 template<typename _Alloc, typename _U1, typename _U2, typename
1198 enable_if<_TMC::template
1199 _MoveConstructibleTuple<_U1, _U2>()
1200 && !_TMC::template
1201 _ImplicitlyMoveConvertibleTuple<_U1, _U2>(),
1202 bool>::type = false>
1203 explicit tuple(allocator_arg_t __tag, const _Alloc& __a,
1204 pair<_U1, _U2>&& __in)
1205 : _Inherited(__tag, __a, std::forward<_U1>(__in.first),
1206 std::forward<_U2>(__in.second)) { }
1207
1208 tuple&
1209 operator=(const tuple& __in)
1210 {
1211 static_cast<_Inherited&>(*this) = __in;
1212 return *this;
1213 }
1214
1215 tuple&
1216 operator=(tuple&& __in)
1217 noexcept(is_nothrow_move_assignable<_Inherited>::value)
1218 {
1219 static_cast<_Inherited&>(*this) = std::move(__in);
1220 return *this;
1221 }
1222
1223 template<typename _U1, typename _U2>
1224 tuple&
1225 operator=(const tuple<_U1, _U2>& __in)
1226 {
1227 static_cast<_Inherited&>(*this) = __in;
1228 return *this;
1229 }
1230
1231 template<typename _U1, typename _U2>
1232 tuple&
1233 operator=(tuple<_U1, _U2>&& __in)
1234 {
1235 static_cast<_Inherited&>(*this) = std::move(__in);
1236 return *this;
1237 }
1238
1239 template<typename _U1, typename _U2>
1240 tuple&
1241 operator=(const pair<_U1, _U2>& __in)
1242 {
1243 this->_M_head(*this) = __in.first;
1244 this->_M_tail(*this)._M_head(*this) = __in.second;
1245 return *this;
1246 }
1247
1248 template<typename _U1, typename _U2>
1249 tuple&
1250 operator=(pair<_U1, _U2>&& __in)
1251 {
1252 this->_M_head(*this) = std::forward<_U1>(__in.first);
1253 this->_M_tail(*this)._M_head(*this) = std::forward<_U2>(__in.second);
1254 return *this;
1255 }
1256
1257 void
1258 swap(tuple& __in)
1259 noexcept(noexcept(__in._M_swap(__in)))
1260 { _Inherited::_M_swap(__in); }
1261 };
1262
1263
1264 /// class tuple_size
1265 template<typename... _Elements>
1266 struct tuple_size<tuple<_Elements...>>
1267 : public integral_constant<std::size_t, sizeof...(_Elements)> { };
1268
1269#if __cplusplus201103L > 201402L
1270 template <typename _Tp>
1271 inline constexpr size_t tuple_size_v = tuple_size<_Tp>::value;
1272#endif
1273
1274 /**
1275 * Recursive case for tuple_element: strip off the first element in
1276 * the tuple and retrieve the (i-1)th element of the remaining tuple.
1277 */
1278 template<std::size_t __i, typename _Head, typename... _Tail>
1279 struct tuple_element<__i, tuple<_Head, _Tail...> >
1280 : tuple_element<__i - 1, tuple<_Tail...> > { };
1281
1282 /**
1283 * Basis case for tuple_element: The first element is the one we're seeking.
1284 */
1285 template<typename _Head, typename... _Tail>
1286 struct tuple_element<0, tuple<_Head, _Tail...> >
1287 {
1288 typedef _Head type;
1289 };
1290
1291 /**
1292 * Error case for tuple_element: invalid index.
1293 */
1294 template<size_t __i>
1295 struct tuple_element<__i, tuple<>>
1296 {
1297 static_assert(__i < tuple_size<tuple<>>::value,
1298 "tuple index is in range");
1299 };
1300
1301 template<std::size_t __i, typename _Head, typename... _Tail>
1302 constexpr _Head&
1303 __get_helper(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1304 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1305
1306 template<std::size_t __i, typename _Head, typename... _Tail>
1307 constexpr const _Head&
1308 __get_helper(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1309 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1310
1311 /// Return a reference to the ith element of a tuple.
1312 template<std::size_t __i, typename... _Elements>
1313 constexpr __tuple_element_t<__i, tuple<_Elements...>>&
1314 get(tuple<_Elements...>& __t) noexcept
1315 { return std::__get_helper<__i>(__t); }
1316
1317 /// Return a const reference to the ith element of a const tuple.
1318 template<std::size_t __i, typename... _Elements>
1319 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&
1320 get(const tuple<_Elements...>& __t) noexcept
1321 { return std::__get_helper<__i>(__t); }
1322
1323 /// Return an rvalue reference to the ith element of a tuple rvalue.
1324 template<std::size_t __i, typename... _Elements>
1325 constexpr __tuple_element_t<__i, tuple<_Elements...>>&&
1326 get(tuple<_Elements...>&& __t) noexcept
1327 {
1328 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1329 return std::forward<__element_type&&>(std::get<__i>(__t));
1330 }
1331
1332 /// Return a const rvalue reference to the ith element of a const tuple rvalue.
1333 template<std::size_t __i, typename... _Elements>
1334 constexpr const __tuple_element_t<__i, tuple<_Elements...>>&&
1335 get(const tuple<_Elements...>&& __t) noexcept
1336 {
1337 typedef __tuple_element_t<__i, tuple<_Elements...>> __element_type;
1338 return std::forward<const __element_type&&>(std::get<__i>(__t));
1339 }
1340
1341#if __cplusplus201103L > 201103L
1342
1343#define __cpp_lib_tuples_by_type 201304
1344
1345 template<typename _Head, size_t __i, typename... _Tail>
1346 constexpr _Head&
1347 __get_helper2(_Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1348 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1349
1350 template<typename _Head, size_t __i, typename... _Tail>
1351 constexpr const _Head&
1352 __get_helper2(const _Tuple_impl<__i, _Head, _Tail...>& __t) noexcept
1353 { return _Tuple_impl<__i, _Head, _Tail...>::_M_head(__t); }
1354
1355 /// Return a reference to the unique element of type _Tp of a tuple.
1356 template <typename _Tp, typename... _Types>
1357 constexpr _Tp&
1358 get(tuple<_Types...>& __t) noexcept
1359 { return std::__get_helper2<_Tp>(__t); }
1360
1361 /// Return a reference to the unique element of type _Tp of a tuple rvalue.
1362 template <typename _Tp, typename... _Types>
1363 constexpr _Tp&&
1364 get(tuple<_Types...>&& __t) noexcept
1365 { return std::forward<_Tp&&>(std::__get_helper2<_Tp>(__t)); }
1366
1367 /// Return a const reference to the unique element of type _Tp of a tuple.
1368 template <typename _Tp, typename... _Types>
1369 constexpr const _Tp&
1370 get(const tuple<_Types...>& __t) noexcept
1371 { return std::__get_helper2<_Tp>(__t); }
1372
1373 /// Return a const reference to the unique element of type _Tp of
1374 /// a const tuple rvalue.
1375 template <typename _Tp, typename... _Types>
1376 constexpr const _Tp&&
1377 get(const tuple<_Types...>&& __t) noexcept
1378 { return std::forward<const _Tp&&>(std::__get_helper2<_Tp>(__t)); }
1379#endif
1380
1381 // This class performs the comparison operations on tuples
1382 template<typename _Tp, typename _Up, size_t __i, size_t __size>
1383 struct __tuple_compare
1384 {
1385 static constexpr bool
1386 __eq(const _Tp& __t, const _Up& __u)
1387 {
1388 return bool(std::get<__i>(__t) == std::get<__i>(__u))
1389 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__eq(__t, __u);
1390 }
1391
1392 static constexpr bool
1393 __less(const _Tp& __t, const _Up& __u)
1394 {
1395 return bool(std::get<__i>(__t) < std::get<__i>(__u))
1396 || (!bool(std::get<__i>(__u) < std::get<__i>(__t))
1397 && __tuple_compare<_Tp, _Up, __i + 1, __size>::__less(__t, __u));
1398 }
1399 };
1400
1401 template<typename _Tp, typename _Up, size_t __size>
1402 struct __tuple_compare<_Tp, _Up, __size, __size>
1403 {
1404 static constexpr bool
1405 __eq(const _Tp&, const _Up&) { return true; }
1406
1407 static constexpr bool
1408 __less(const _Tp&, const _Up&) { return false; }
1409 };
1410
1411 template<typename... _TElements, typename... _UElements>
1412 constexpr bool
1413 operator==(const tuple<_TElements...>& __t,
1414 const tuple<_UElements...>& __u)
1415 {
1416 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1417 "tuple objects can only be compared if they have equal sizes.");
1418 using __compare = __tuple_compare<tuple<_TElements...>,
1419 tuple<_UElements...>,
1420 0, sizeof...(_TElements)>;
1421 return __compare::__eq(__t, __u);
1422 }
1423
1424 template<typename... _TElements, typename... _UElements>
1425 constexpr bool
1426 operator<(const tuple<_TElements...>& __t,
1427 const tuple<_UElements...>& __u)
1428 {
1429 static_assert(sizeof...(_TElements) == sizeof...(_UElements),
1430 "tuple objects can only be compared if they have equal sizes.");
1431 using __compare = __tuple_compare<tuple<_TElements...>,
1432 tuple<_UElements...>,
1433 0, sizeof...(_TElements)>;
1434 return __compare::__less(__t, __u);
1435 }
1436
1437 template<typename... _TElements, typename... _UElements>
1438 constexpr bool
1439 operator!=(const tuple<_TElements...>& __t,
1440 const tuple<_UElements...>& __u)
1441 { return !(__t == __u); }
1442
1443 template<typename... _TElements, typename... _UElements>
1444 constexpr bool
1445 operator>(const tuple<_TElements...>& __t,
1446 const tuple<_UElements...>& __u)
1447 { return __u < __t; }
1448
1449 template<typename... _TElements, typename... _UElements>
1450 constexpr bool
1451 operator<=(const tuple<_TElements...>& __t,
1452 const tuple<_UElements...>& __u)
1453 { return !(__u < __t); }
1454
1455 template<typename... _TElements, typename... _UElements>
1456 constexpr bool
1457 operator>=(const tuple<_TElements...>& __t,
1458 const tuple<_UElements...>& __u)
1459 { return !(__t < __u); }
1460
1461 // NB: DR 705.
1462 template<typename... _Elements>
1463 constexpr tuple<typename __decay_and_strip<_Elements>::__type...>
1464 make_tuple(_Elements&&... __args)
1465 {
1466 typedef tuple<typename __decay_and_strip<_Elements>::__type...>
1467 __result_type;
1468 return __result_type(std::forward<_Elements>(__args)...);
6
Calling constructor for 'tuple<(lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
21
Returning from constructor for 'tuple<(lambda at /build/llvm-toolchain-snapshot-7~svn338205/tools/clang/tools/extra/clangd/TUScheduler.cpp:402:15), llvm::unique_function<void (llvm::Expected<clang::clangd::InputsAndAST>)>>'
1469 }
1470
1471 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1472 // 2275. Why is forward_as_tuple not constexpr?
1473 template<typename... _Elements>
1474 constexpr tuple<_Elements&&...>
1475 forward_as_tuple(_Elements&&... __args) noexcept
1476 { return tuple<_Elements&&...>(std::forward<_Elements>(__args)...); }
1477
1478 template<size_t, typename, typename, size_t>
1479 struct __make_tuple_impl;
1480
1481 template<size_t _Idx, typename _Tuple, typename... _Tp, size_t _Nm>
1482 struct __make_tuple_impl<_Idx, tuple<_Tp...>, _Tuple, _Nm>
1483 : __make_tuple_impl<_Idx + 1,
1484 tuple<_Tp..., __tuple_element_t<_Idx, _Tuple>>,
1485 _Tuple, _Nm>
1486 { };
1487
1488 template<std::size_t _Nm, typename _Tuple, typename... _Tp>
1489 struct __make_tuple_impl<_Nm, tuple<_Tp...>, _Tuple, _Nm>
1490 {
1491 typedef tuple<_Tp...> __type;
1492 };
1493
1494 template<typename _Tuple>
1495 struct __do_make_tuple
1496 : __make_tuple_impl<0, tuple<>, _Tuple, std::tuple_size<_Tuple>::value>
1497 { };
1498
1499 // Returns the std::tuple equivalent of a tuple-like type.
1500 template<typename _Tuple>
1501 struct __make_tuple
1502 : public __do_make_tuple<typename std::remove_cv
1503 <typename std::remove_reference<_Tuple>::type>::type>
1504 { };
1505
1506 // Combines several std::tuple's into a single one.
1507 template<typename...>
1508 struct __combine_tuples;
1509
1510 template<>
1511 struct __combine_tuples<>
1512 {
1513 typedef tuple<> __type;
1514 };
1515
1516 template<typename... _Ts>
1517 struct __combine_tuples<tuple<_Ts...>>
1518 {
1519 typedef tuple<_Ts...> __type;
1520 };
1521
1522 template<typename... _T1s, typename... _T2s, typename... _Rem>
1523 struct __combine_tuples<tuple<_T1s...>, tuple<_T2s...>, _Rem...>
1524 {
1525 typedef typename __combine_tuples<tuple<_T1s..., _T2s...>,
1526 _Rem...>::__type __type;
1527 };
1528
1529 // Computes the result type of tuple_cat given a set of tuple-like types.
1530 template<typename... _Tpls>
1531 struct __tuple_cat_result
1532 {
1533 typedef typename __combine_tuples
1534 <typename __make_tuple<_Tpls>::__type...>::__type __type;
1535 };
1536
1537 // Helper to determine the index set for the first tuple-like
1538 // type of a given set.
1539 template<typename...>
1540 struct __make_1st_indices;
1541
1542 template<>
1543 struct __make_1st_indices<>
1544 {
1545 typedef std::_Index_tuple<> __type;
1546 };
1547
1548 template<typename _Tp, typename... _Tpls>
1549 struct __make_1st_indices<_Tp, _Tpls...>
1550 {
1551 typedef typename std::_Build_index_tuple<std::tuple_size<
1552 typename std::remove_reference<_Tp>::type>::value>::__type __type;
1553 };
1554
1555 // Performs the actual concatenation by step-wise expanding tuple-like
1556 // objects into the elements, which are finally forwarded into the
1557 // result tuple.
1558 template<typename _Ret, typename _Indices, typename... _Tpls>
1559 struct __tuple_concater;
1560
1561 template<typename _Ret, std::size_t... _Is, typename _Tp, typename... _Tpls>
1562 struct __tuple_concater<_Ret, std::_Index_tuple<_Is...>, _Tp, _Tpls...>
1563 {
1564 template<typename... _Us>
1565 static constexpr _Ret
1566 _S_do(_Tp&& __tp, _Tpls&&... __tps, _Us&&... __us)
1567 {
1568 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1569 typedef __tuple_concater<_Ret, __idx, _Tpls...> __next;
1570 return __next::_S_do(std::forward<_Tpls>(__tps)...,
1571 std::forward<_Us>(__us)...,
1572 std::get<_Is>(std::forward<_Tp>(__tp))...);
1573 }
1574 };
1575
1576 template<typename _Ret>
1577 struct __tuple_concater<_Ret, std::_Index_tuple<>>
1578 {
1579 template<typename... _Us>
1580 static constexpr _Ret
1581 _S_do(_Us&&... __us)
1582 {
1583 return _Ret(std::forward<_Us>(__us)...);
1584 }
1585 };
1586
1587 /// tuple_cat
1588 template<typename... _Tpls, typename = typename
1589 enable_if<__and_<__is_tuple_like<_Tpls>...>::value>::type>
1590 constexpr auto
1591 tuple_cat(_Tpls&&... __tpls)
1592 -> typename __tuple_cat_result<_Tpls...>::__type
1593 {
1594 typedef typename __tuple_cat_result<_Tpls...>::__type __ret;
1595 typedef typename __make_1st_indices<_Tpls...>::__type __idx;
1596 typedef __tuple_concater<__ret, __idx, _Tpls...> __concater;
1597 return __concater::_S_do(std::forward<_Tpls>(__tpls)...);
1598 }
1599
1600 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1601 // 2301. Why is tie not constexpr?
1602 /// tie
1603 template<typename... _Elements>
1604 constexpr tuple<_Elements&...>
1605 tie(_Elements&... __args) noexcept
1606 { return tuple<_Elements&...>(__args...); }
1607
1608 /// swap
1609 template<typename... _Elements>
1610 inline
1611#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1612 // Constrained free swap overload, see p0185r1
1613 typename enable_if<__and_<__is_swappable<_Elements>...>::value
1614 >::type
1615#else
1616 void
1617#endif
1618 swap(tuple<_Elements...>& __x, tuple<_Elements...>& __y)
1619 noexcept(noexcept(__x.swap(__y)))
1620 { __x.swap(__y); }
1621
1622#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1623 template<typename... _Elements>
1624 typename enable_if<!__and_<__is_swappable<_Elements>...>::value>::type
1625 swap(tuple<_Elements...>&, tuple<_Elements...>&) = delete;
1626#endif
1627
1628 // A class (and instance) which can be used in 'tie' when an element
1629 // of a tuple is not required.
1630 // _GLIBCXX14_CONSTEXPR
1631 // 2933. PR for LWG 2773 could be clearer
1632 struct _Swallow_assign
1633 {
1634 template<class _Tp>
1635 _GLIBCXX14_CONSTEXPR const _Swallow_assign&
1636 operator=(const _Tp&) const
1637 { return *this; }
1638 };
1639
1640 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1641 // 2773. Making std::ignore constexpr
1642 _GLIBCXX17_INLINE constexpr _Swallow_assign ignore{};
1643
1644 /// Partial specialization for tuples
1645 template<typename... _Types, typename _Alloc>
1646 struct uses_allocator<tuple<_Types...>, _Alloc> : true_type { };
1647
1648 // See stl_pair.h...
1649 template<class _T1, class _T2>
1650 template<typename... _Args1, typename... _Args2>
1651 inline
1652 pair<_T1, _T2>::
1653 pair(piecewise_construct_t,
1654 tuple<_Args1...> __first, tuple<_Args2...> __second)
1655 : pair(__first, __second,
1656 typename _Build_index_tuple<sizeof...(_Args1)>::__type(),
1657 typename _Build_index_tuple<sizeof...(_Args2)>::__type())
1658 { }
1659
1660 template<class _T1, class _T2>
1661 template<typename... _Args1, std::size_t... _Indexes1,
1662 typename... _Args2, std::size_t... _Indexes2>
1663 inline
1664 pair<_T1, _T2>::
1665 pair(tuple<_Args1...>& __tuple1, tuple<_Args2...>& __tuple2,
1666 _Index_tuple<_Indexes1...>, _Index_tuple<_Indexes2...>)
1667 : first(std::forward<_Args1>(std::get<_Indexes1>(__tuple1))...),
1668 second(std::forward<_Args2>(std::get<_Indexes2>(__tuple2))...)
1669 { }
1670
1671#if __cplusplus201103L > 201402L
1672# define __cpp_lib_apply 201603
1673
1674 template <typename _Fn, typename _Tuple, size_t... _Idx>
1675 constexpr decltype(auto)
1676 __apply_impl(_Fn&& __f, _Tuple&& __t, index_sequence<_Idx...>)
1677 {
1678 return std::__invoke(std::forward<_Fn>(__f),
1679 std::get<_Idx>(std::forward<_Tuple>(__t))...);
1680 }
1681
1682 template <typename _Fn, typename _Tuple>
1683 constexpr decltype(auto)
1684 apply(_Fn&& __f, _Tuple&& __t)
1685 {
1686 using _Indices = make_index_sequence<tuple_size_v<decay_t<_Tuple>>>;
1687 return std::__apply_impl(std::forward<_Fn>(__f),
1688 std::forward<_Tuple>(__t),
1689 _Indices{});
1690 }
1691
1692#define __cpp_lib_make_from_tuple 201606
1693
1694 template <typename _Tp, typename _Tuple, size_t... _Idx>
1695 constexpr _Tp
1696 __make_from_tuple_impl(_Tuple&& __t, index_sequence<_Idx...>)
1697 { return _Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...); }
1698
1699 template <typename _Tp, typename _Tuple>
1700 constexpr _Tp
1701 make_from_tuple(_Tuple&& __t)
1702 {
1703 return __make_from_tuple_impl<_Tp>(
1704 std::forward<_Tuple>(__t),
1705 make_index_sequence<tuple_size_v<decay_t<_Tuple>>>{});
1706 }
1707#endif // C++17
1708
1709 /// @}
1710
1711_GLIBCXX_END_NAMESPACE_VERSION
1712} // namespace std
1713
1714#endif // C++11
1715
1716#endif // _GLIBCXX_TUPLE

/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/ADT/FunctionExtras.h

1//===- FunctionExtras.h - Function type erasure utilities -------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file provides a collection of function (or more generally, callable)
11/// type erasure utilities supplementing those provided by the standard library
12/// in `<function>`.
13///
14/// It provides `unique_function`, which works like `std::function` but supports
15/// move-only callable objects.
16///
17/// Future plans:
18/// - Add a `function` that provides const, volatile, and ref-qualified support,
19/// which doesn't work with `std::function`.
20/// - Provide support for specifying multiple signatures to type erase callable
21/// objects with an overload set, such as those produced by generic lambdas.
22/// - Expand to include a copyable utility that directly replaces std::function
23/// but brings the above improvements.
24///
25/// Note that LLVM's utilities are greatly simplified by not supporting
26/// allocators.
27///
28/// If the standard library ever begins to provide comparable facilities we can
29/// consider switching to those.
30///
31//===----------------------------------------------------------------------===//
32
33#ifndef LLVM_ADT_FUNCTION_EXTRAS_H
34#define LLVM_ADT_FUNCTION_EXTRAS_H
35
36#include "llvm/ADT/PointerIntPair.h"
37#include "llvm/ADT/PointerUnion.h"
38#include "llvm/Support/type_traits.h"
39#include <memory>
40
41namespace llvm {
42
43template <typename FunctionT> class unique_function;
44
45template <typename ReturnT, typename... ParamTs>
46class unique_function<ReturnT(ParamTs...)> {
47 static constexpr size_t InlineStorageSize = sizeof(void *) * 3;
48
49 // MSVC has a bug and ICEs if we give it a particular dependent value
50 // expression as part of the `std::conditional` below. To work around this,
51 // we build that into a template struct's constexpr bool.
52 template <typename T> struct IsSizeLessThanThresholdT {
53 static constexpr bool value = sizeof(T) <= (2 * sizeof(void *));
54 };
55
56 // Provide a type function to map parameters that won't observe extra copies
57 // or moves and which are small enough to likely pass in register to values
58 // and all other types to l-value reference types. We use this to compute the
59 // types used in our erased call utility to minimize copies and moves unless
60 // doing so would force things unnecessarily into memory.
61 //
62 // The heuristic used is related to common ABI register passing conventions.
63 // It doesn't have to be exact though, and in one way it is more strict
64 // because we want to still be able to observe either moves *or* copies.
65 template <typename T>
66 using AdjustedParamT = typename std::conditional<
67 !std::is_reference<T>::value &&
68 llvm::is_trivially_copy_constructible<T>::value &&
69 llvm::is_trivially_move_constructible<T>::value &&
70 IsSizeLessThanThresholdT<T>::value,
71 T, T &>::type;
72
73 // The type of the erased function pointer we use as a callback to dispatch to
74 // the stored callable when it is trivial to move and destroy.
75 using CallPtrT = ReturnT (*)(void *CallableAddr,
76 AdjustedParamT<ParamTs>... Params);
77 using MovePtrT = void (*)(void *LHSCallableAddr, void *RHSCallableAddr);
78 using DestroyPtrT = void (*)(void *CallableAddr);
79
80 /// A struct to hold a single trivial callback with sufficient alignment for
81 /// our bitpacking.
82 struct alignas(8) TrivialCallback {
83 CallPtrT CallPtr;
84 };
85
86 /// A struct we use to aggregate three callbacks when we need full set of
87 /// operations.
88 struct alignas(8) NonTrivialCallbacks {
89 CallPtrT CallPtr;
90 MovePtrT MovePtr;
91 DestroyPtrT DestroyPtr;
92 };
93
94 // Create a pointer union between either a pointer to a static trivial call
95 // pointer in a struct or a pointer to a static struct of the call, move, and
96 // destroy pointers.
97 using CallbackPointerUnionT =
98 PointerUnion<TrivialCallback *, NonTrivialCallbacks *>;
99
100 // The main storage buffer. This will either have a pointer to out-of-line
101 // storage or an inline buffer storing the callable.
102 union StorageUnionT {
103 // For out-of-line storage we keep a pointer to the underlying storage and
104 // the size. This is enough to deallocate the memory.
105 struct OutOfLineStorageT {
106 void *StoragePtr;
107 size_t Size;
108 size_t Alignment;
109 } OutOfLineStorage;
110 static_assert(
111 sizeof(OutOfLineStorageT) <= InlineStorageSize,
112 "Should always use all of the out-of-line storage for inline storage!");
113
114 // For in-line storage, we just provide an aligned character buffer. We
115 // provide three pointers worth of storage here.
116 typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type
117 InlineStorage;
118 } StorageUnion;
119
120 // A compressed pointer to either our dispatching callback or our table of
121 // dispatching callbacks and the flag for whether the callable itself is
122 // stored inline or not.
123 PointerIntPair<CallbackPointerUnionT, 1, bool> CallbackAndInlineFlag;
124
125 bool isInlineStorage() const { return CallbackAndInlineFlag.getInt(); }
126
127 bool isTrivialCallback() const {
128 return CallbackAndInlineFlag.getPointer().template is<TrivialCallback *>();
129 }
130
131 CallPtrT getTrivialCallback() const {
132 return CallbackAndInlineFlag.getPointer().template get<TrivialCallback *>()->CallPtr;
133 }
134
135 NonTrivialCallbacks *getNonTrivialCallbacks() const {
136 return CallbackAndInlineFlag.getPointer()
137 .template get<NonTrivialCallbacks *>();
138 }
139
140 void *getInlineStorage() { return &StorageUnion.InlineStorage; }
141
142 void *getOutOfLineStorage() {
143 return StorageUnion.OutOfLineStorage.StoragePtr;
33
Undefined or garbage value returned to caller
144 }
145 size_t getOutOfLineStorageSize() const {
146 return StorageUnion.OutOfLineStorage.Size;
147 }
148 size_t getOutOfLineStorageAlignment() const {
149 return StorageUnion.OutOfLineStorage.Alignment;
150 }
151
152 void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment) {
153 StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment};
154 }
155
156 template <typename CallableT>
157 static ReturnT CallImpl(void *CallableAddr, AdjustedParamT<ParamTs>... Params) {
158 return (*reinterpret_cast<CallableT *>(CallableAddr))(
159 std::forward<ParamTs>(Params)...);
160 }
161
162 template <typename CallableT>
163 static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept {
164 new (LHSCallableAddr)
165 CallableT(std::move(*reinterpret_cast<CallableT *>(RHSCallableAddr)));
166 }
167
168 template <typename CallableT>
169 static void DestroyImpl(void *CallableAddr) noexcept {
170 reinterpret_cast<CallableT *>(CallableAddr)->~CallableT();
171 }
172
173public:
174 unique_function() = default;
175 unique_function(std::nullptr_t /*null_callable*/) {}
176
177 ~unique_function() {
178 if (!CallbackAndInlineFlag.getPointer())
28
Taking false branch
179 return;
180
181 // Cache this value so we don't re-check it after type-erased operations.
182 bool IsInlineStorage = isInlineStorage();
183
184 if (!isTrivialCallback())
29
Taking false branch
185 getNonTrivialCallbacks()->DestroyPtr(
186 IsInlineStorage ? getInlineStorage() : getOutOfLineStorage());
187
188 if (!IsInlineStorage)
30
Assuming 'IsInlineStorage' is 0
31
Taking true branch
189 deallocate_buffer(getOutOfLineStorage(), getOutOfLineStorageSize(),
32
Calling 'unique_function::getOutOfLineStorage'
190 getOutOfLineStorageAlignment());
191 }
192
193 unique_function(unique_function &&RHS) noexcept {
194 // Copy the callback and inline flag.
195 CallbackAndInlineFlag = RHS.CallbackAndInlineFlag;
196
197 // If the RHS is empty, just copying the above is sufficient.
198 if (!RHS)
11
Taking true branch
199 return;
12
Returning without writing to 'this->StorageUnion.OutOfLineStorage.StoragePtr'
200
201 if (!isInlineStorage()) {
202 // The out-of-line case is easiest to move.
203 StorageUnion.OutOfLineStorage = RHS.StorageUnion.OutOfLineStorage;
204 } else if (isTrivialCallback()) {
205 // Move is trivial, just memcpy the bytes across.
206 memcpy(getInlineStorage(), RHS.getInlineStorage(), InlineStorageSize);
207 } else {
208 // Non-trivial move, so dispatch to a type-erased implementation.
209 getNonTrivialCallbacks()->MovePtr(getInlineStorage(),
210 RHS.getInlineStorage());
211 }
212
213 // Clear the old callback and inline flag to get back to as-if-null.
214 RHS.CallbackAndInlineFlag = {};
215
216#ifndef NDEBUG
217 // In debug builds, we also scribble across the rest of the storage.
218 memset(RHS.getInlineStorage(), 0xAD, InlineStorageSize);
219#endif
220 }
221
222 unique_function &operator=(unique_function &&RHS) noexcept {
223 if (this == &RHS)
224 return *this;
225
226 // Because we don't try to provide any exception safety guarantees we can
227 // implement move assignment very simply by first destroying the current
228 // object and then move-constructing over top of it.
229 this->~unique_function();
230 new (this) unique_function(std::move(RHS));
231 return *this;
232 }
233
234 template <typename CallableT> unique_function(CallableT Callable) {
235 bool IsInlineStorage = true;
236 void *CallableAddr = getInlineStorage();
237 if (sizeof(CallableT) > InlineStorageSize ||
238 alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) {
239 IsInlineStorage = false;
240 // Allocate out-of-line storage. FIXME: Use an explicit alignment
241 // parameter in C++17 mode.
242 auto Size = sizeof(CallableT);
243 auto Alignment = alignof(CallableT);
244 CallableAddr = allocate_buffer(Size, Alignment);
245 setOutOfLineStorage(CallableAddr, Size, Alignment);
246 }
247
248 // Now move into the storage.
249 new (CallableAddr) CallableT(std::move(Callable));
250
251 // See if we can create a trivial callback. We need the callable to be
252 // trivially moved and trivially destroyed so that we don't have to store
253 // type erased callbacks for those operations.
254 //
255 // FIXME: We should use constexpr if here and below to avoid instantiating
256 // the non-trivial static objects when unnecessary. While the linker should
257 // remove them, it is still wasteful.
258 if (llvm::is_trivially_move_constructible<CallableT>::value &&
259 std::is_trivially_destructible<CallableT>::value) {
260 // We need to create a nicely aligned object. We use a static variable
261 // for this because it is a trivial struct.
262 static TrivialCallback Callback = { &CallImpl<CallableT> };
263
264 CallbackAndInlineFlag = {&Callback, IsInlineStorage};
265 return;
266 }
267
268 // Otherwise, we need to point at an object that contains all the different
269 // type erased behaviors needed. Create a static instance of the struct type
270 // here and then use a pointer to that.
271 static NonTrivialCallbacks Callbacks = {
272 &CallImpl<CallableT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>};
273
274 CallbackAndInlineFlag = {&Callbacks, IsInlineStorage};
275 }
276
277 ReturnT operator()(ParamTs... Params) {
278 void *CallableAddr =
279 isInlineStorage() ? getInlineStorage() : getOutOfLineStorage();
280
281 return (isTrivialCallback()
282 ? getTrivialCallback()
283 : getNonTrivialCallbacks()->CallPtr)(CallableAddr, Params...);
284 }
285
286 explicit operator bool() const {
287 return (bool)CallbackAndInlineFlag.getPointer();
288 }
289};
290
291} // end namespace llvm
292
293#endif // LLVM_ADT_FUNCTION_H