LLVM 22.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/ADL.h"
20#include "llvm/ADT/STLExtras.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstring>
32#include <initializer_list>
33#include <iterator>
34#include <new>
35#include <type_traits>
36#include <utility>
37
38namespace llvm {
39
40namespace detail {
41
42// We extend a pair to allow users to override the bucket type with their own
43// implementation without requiring two members.
44template <typename KeyT, typename ValueT>
45struct DenseMapPair : std::pair<KeyT, ValueT> {
46 using std::pair<KeyT, ValueT>::pair;
47
48 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
49 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
50 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
51 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
52};
53
54} // end namespace detail
55
56template <typename KeyT, typename ValueT,
57 typename KeyInfoT = DenseMapInfo<KeyT>,
59 bool IsConst = false>
60class DenseMapIterator;
61
62template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
63 typename BucketT>
65 template <typename T>
66 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
67
68public:
70 using key_type = KeyT;
71 using mapped_type = ValueT;
72 using value_type = BucketT;
73
77
78 [[nodiscard]] inline iterator begin() {
79 return iterator::makeBegin(buckets(), empty(), *this);
80 }
81 [[nodiscard]] inline iterator end() {
82 return iterator::makeEnd(buckets(), *this);
83 }
84 [[nodiscard]] inline const_iterator begin() const {
85 return const_iterator::makeBegin(buckets(), empty(), *this);
86 }
87 [[nodiscard]] inline const_iterator end() const {
88 return const_iterator::makeEnd(buckets(), *this);
89 }
90
91 // Return an iterator to iterate over keys in the map.
92 [[nodiscard]] inline auto keys() {
93 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
94 }
95
96 // Return an iterator to iterate over values in the map.
97 [[nodiscard]] inline auto values() {
98 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
99 }
100
101 [[nodiscard]] inline auto keys() const {
102 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
103 }
104
105 [[nodiscard]] inline auto values() const {
106 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
107 }
108
109 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
110 [[nodiscard]] unsigned size() const { return getNumEntries(); }
111
112 /// Grow the densemap so that it can contain at least \p NumEntries items
113 /// before resizing again.
114 void reserve(size_type NumEntries) {
115 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
117 if (NumBuckets > getNumBuckets())
118 grow(NumBuckets);
119 }
120
121 void clear() {
123 if (getNumEntries() == 0 && getNumTombstones() == 0)
124 return;
125
126 // If the capacity of the array is huge, and the # elements used is small,
127 // shrink the array.
128 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
130 return;
131 }
132
133 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
134 if constexpr (std::is_trivially_destructible_v<ValueT>) {
135 // Use a simpler loop when values don't need destruction.
136 for (BucketT &B : buckets())
137 B.getFirst() = EmptyKey;
138 } else {
139 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
140 unsigned NumEntries = getNumEntries();
141 for (BucketT &B : buckets()) {
142 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey)) {
143 if (!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
144 B.getSecond().~ValueT();
145 --NumEntries;
146 }
147 B.getFirst() = EmptyKey;
148 }
149 }
150 assert(NumEntries == 0 && "Node count imbalance!");
151 (void)NumEntries;
152 }
153 setNumEntries(0);
154 setNumTombstones(0);
155 }
156
158 auto [Reallocate, NewNumBuckets] = derived().planShrinkAndClear();
159 destroyAll();
160 if (!Reallocate) {
161 initEmpty();
162 return;
163 }
164 derived().deallocateBuckets();
165 derived().init(NewNumBuckets);
166 }
167
168 /// Return true if the specified key is in the map, false otherwise.
169 [[nodiscard]] bool contains(const_arg_type_t<KeyT> Val) const {
170 return doFind(Val) != nullptr;
171 }
172
173 /// Return 1 if the specified key is in the map, 0 otherwise.
174 [[nodiscard]] size_type count(const_arg_type_t<KeyT> Val) const {
175 return contains(Val) ? 1 : 0;
176 }
177
178 [[nodiscard]] iterator find(const_arg_type_t<KeyT> Val) {
179 return find_as(Val);
180 }
181 [[nodiscard]] const_iterator find(const_arg_type_t<KeyT> Val) const {
182 return find_as(Val);
183 }
184
185 /// Alternate version of find() which allows a different, and possibly
186 /// less expensive, key type.
187 /// The DenseMapInfo is responsible for supplying methods
188 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
189 /// type used.
190 template <class LookupKeyT>
191 [[nodiscard]] iterator find_as(const LookupKeyT &Val) {
192 if (BucketT *Bucket = doFind(Val))
193 return makeIterator(Bucket);
194 return end();
195 }
196 template <class LookupKeyT>
197 [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const {
198 if (const BucketT *Bucket = doFind(Val))
199 return makeConstIterator(Bucket);
200 return end();
201 }
202
203 /// lookup - Return the entry for the specified key, or a default
204 /// constructed value if no such entry exists.
205 [[nodiscard]] ValueT lookup(const_arg_type_t<KeyT> Val) const {
206 if (const BucketT *Bucket = doFind(Val))
207 return Bucket->getSecond();
208 return ValueT();
209 }
210
211 // Return the entry with the specified key, or \p Default. This variant is
212 // useful, because `lookup` cannot be used with non-default-constructible
213 // values.
214 template <typename U = std::remove_cv_t<ValueT>>
215 [[nodiscard]] ValueT lookup_or(const_arg_type_t<KeyT> Val,
216 U &&Default) const {
217 if (const BucketT *Bucket = doFind(Val))
218 return Bucket->getSecond();
219 return Default;
220 }
221
222 /// at - Return the entry for the specified key, or abort if no such
223 /// entry exists.
224 [[nodiscard]] const ValueT &at(const_arg_type_t<KeyT> Val) const {
225 auto Iter = this->find(std::move(Val));
226 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
227 return Iter->second;
228 }
229
230 // Inserts key,value pair into the map if the key isn't already in the map.
231 // If the key is already in the map, it returns false and doesn't update the
232 // value.
233 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
234 return try_emplace_impl(KV.first, KV.second);
235 }
236
237 // Inserts key,value pair into the map if the key isn't already in the map.
238 // If the key is already in the map, it returns false and doesn't update the
239 // value.
240 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
241 return try_emplace_impl(std::move(KV.first), std::move(KV.second));
242 }
243
244 // Inserts key,value pair into the map if the key isn't already in the map.
245 // The value is constructed in-place if the key is not in the map, otherwise
246 // it is not moved.
247 template <typename... Ts>
248 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&...Args) {
249 return try_emplace_impl(std::move(Key), std::forward<Ts>(Args)...);
250 }
251
252 // Inserts key,value pair into the map if the key isn't already in the map.
253 // The value is constructed in-place if the key is not in the map, otherwise
254 // it is not moved.
255 template <typename... Ts>
256 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&...Args) {
257 return try_emplace_impl(Key, std::forward<Ts>(Args)...);
258 }
259
260 /// Alternate version of insert() which allows a different, and possibly
261 /// less expensive, key type.
262 /// The DenseMapInfo is responsible for supplying methods
263 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
264 /// type used.
265 template <typename LookupKeyT>
266 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
267 const LookupKeyT &Val) {
268 BucketT *TheBucket;
269 if (LookupBucketFor(Val, TheBucket))
270 return {makeIterator(TheBucket), false}; // Already in map.
271
272 // Otherwise, insert the new element.
273 TheBucket = findBucketForInsertion(Val, TheBucket);
274 TheBucket->getFirst() = std::move(KV.first);
275 ::new (&TheBucket->getSecond()) ValueT(std::move(KV.second));
276 return {makeIterator(TheBucket), true};
277 }
278
279 /// insert - Range insertion of pairs.
280 template <typename InputIt> void insert(InputIt I, InputIt E) {
281 for (; I != E; ++I)
282 insert(*I);
283 }
284
285 /// Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
286 template <typename Range> void insert_range(Range &&R) {
287 insert(adl_begin(R), adl_end(R));
288 }
289
290 template <typename V>
291 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
292 auto Ret = try_emplace(Key, std::forward<V>(Val));
293 if (!Ret.second)
294 Ret.first->second = std::forward<V>(Val);
295 return Ret;
296 }
297
298 template <typename V>
299 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
300 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
301 if (!Ret.second)
302 Ret.first->second = std::forward<V>(Val);
303 return Ret;
304 }
305
306 template <typename... Ts>
307 std::pair<iterator, bool> emplace_or_assign(const KeyT &Key, Ts &&...Args) {
308 auto Ret = try_emplace(Key, std::forward<Ts>(Args)...);
309 if (!Ret.second)
310 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
311 return Ret;
312 }
313
314 template <typename... Ts>
315 std::pair<iterator, bool> emplace_or_assign(KeyT &&Key, Ts &&...Args) {
316 auto Ret = try_emplace(std::move(Key), std::forward<Ts>(Args)...);
317 if (!Ret.second)
318 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
319 return Ret;
320 }
321
322 bool erase(const KeyT &Val) {
323 BucketT *TheBucket = doFind(Val);
324 if (!TheBucket)
325 return false; // not in map.
326
327 TheBucket->getSecond().~ValueT();
328 TheBucket->getFirst() = KeyInfoT::getTombstoneKey();
329 decrementNumEntries();
330 incrementNumTombstones();
331 return true;
332 }
334 BucketT *TheBucket = &*I;
335 TheBucket->getSecond().~ValueT();
336 TheBucket->getFirst() = KeyInfoT::getTombstoneKey();
337 decrementNumEntries();
338 incrementNumTombstones();
339 }
340
341 ValueT &operator[](const KeyT &Key) {
342 return lookupOrInsertIntoBucket(Key).first->second;
343 }
344
345 ValueT &operator[](KeyT &&Key) {
346 return lookupOrInsertIntoBucket(std::move(Key)).first->second;
347 }
348
349 /// isPointerIntoBucketsArray - Return true if the specified pointer points
350 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
351 /// value in the DenseMap).
352 [[nodiscard]] bool isPointerIntoBucketsArray(const void *Ptr) const {
353 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
354 }
355
356 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
357 /// array. In conjunction with the previous method, this can be used to
358 /// determine whether an insertion caused the DenseMap to reallocate.
359 [[nodiscard]] const void *getPointerIntoBucketsArray() const {
360 return getBuckets();
361 }
362
363protected:
364 DenseMapBase() = default;
365
366 void destroyAll() {
367 // No need to iterate through the buckets if both KeyT and ValueT are
368 // trivially destructible.
369 if constexpr (std::is_trivially_destructible_v<KeyT> &&
370 std::is_trivially_destructible_v<ValueT>)
371 return;
372
373 if (getNumBuckets() == 0) // Nothing to do.
374 return;
375
376 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
377 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
378 for (BucketT &B : buckets()) {
379 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
380 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey))
381 B.getSecond().~ValueT();
382 B.getFirst().~KeyT();
383 }
384 }
385
386 void initEmpty() {
387 static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
388 "Must pass the derived type to this template!");
389 setNumEntries(0);
390 setNumTombstones(0);
391
392 assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
393 "# initial buckets must be a power of two!");
394 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
395 for (BucketT &B : buckets())
396 ::new (&B.getFirst()) KeyT(EmptyKey);
397 }
398
399 /// Returns the number of buckets to allocate to ensure that the DenseMap can
400 /// accommodate \p NumEntries without need to grow().
401 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
402 // Ensure that "NumEntries * 4 < NumBuckets * 3"
403 if (NumEntries == 0)
404 return 0;
405 // +1 is required because of the strict inequality.
406 // For example, if NumEntries is 48, we need to return 128.
407 return NextPowerOf2(NumEntries * 4 / 3 + 1);
408 }
409
411 initEmpty();
412
413 // Insert all the old elements.
414 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
415 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
416 for (BucketT &B : OldBuckets) {
417 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
418 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
419 // Insert the key/value into the new table.
420 BucketT *DestBucket;
421 bool FoundVal = LookupBucketFor(B.getFirst(), DestBucket);
422 (void)FoundVal; // silence warning.
423 assert(!FoundVal && "Key already in new map?");
424 DestBucket->getFirst() = std::move(B.getFirst());
425 ::new (&DestBucket->getSecond()) ValueT(std::move(B.getSecond()));
426 incrementNumEntries();
427
428 // Free the value.
429 B.getSecond().~ValueT();
430 }
431 B.getFirst().~KeyT();
432 }
433 }
434
435 void copyFrom(const DerivedT &other) {
436 this->destroyAll();
437 derived().deallocateBuckets();
438 setNumEntries(0);
439 setNumTombstones(0);
440 if (!derived().allocateBuckets(other.getNumBuckets())) {
441 // The bucket list is empty. No work to do.
442 return;
443 }
444
445 assert(&other != this);
446 assert(getNumBuckets() == other.getNumBuckets());
447
448 setNumEntries(other.getNumEntries());
449 setNumTombstones(other.getNumTombstones());
450
451 BucketT *Buckets = getBuckets();
452 const BucketT *OtherBuckets = other.getBuckets();
453 const size_t NumBuckets = getNumBuckets();
454 if constexpr (std::is_trivially_copyable_v<KeyT> &&
455 std::is_trivially_copyable_v<ValueT>) {
456 memcpy(reinterpret_cast<void *>(Buckets), OtherBuckets,
457 NumBuckets * sizeof(BucketT));
458 } else {
459 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
460 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
461 for (size_t I = 0; I < NumBuckets; ++I) {
462 ::new (&Buckets[I].getFirst()) KeyT(OtherBuckets[I].getFirst());
463 if (!KeyInfoT::isEqual(Buckets[I].getFirst(), EmptyKey) &&
464 !KeyInfoT::isEqual(Buckets[I].getFirst(), TombstoneKey))
465 ::new (&Buckets[I].getSecond()) ValueT(OtherBuckets[I].getSecond());
466 }
467 }
468 }
469
470private:
471 DerivedT &derived() { return *static_cast<DerivedT *>(this); }
472 const DerivedT &derived() const {
473 return *static_cast<const DerivedT *>(this);
474 }
475
476 template <typename KeyArgT, typename... Ts>
477 std::pair<BucketT *, bool> lookupOrInsertIntoBucket(KeyArgT &&Key,
478 Ts &&...Args) {
479 BucketT *TheBucket = nullptr;
480 if (LookupBucketFor(Key, TheBucket))
481 return {TheBucket, false}; // Already in the map.
482
483 // Otherwise, insert the new element.
484 TheBucket = findBucketForInsertion(Key, TheBucket);
485 TheBucket->getFirst() = std::forward<KeyArgT>(Key);
486 ::new (&TheBucket->getSecond()) ValueT(std::forward<Ts>(Args)...);
487 return {TheBucket, true};
488 }
489
490 template <typename KeyArgT, typename... Ts>
491 std::pair<iterator, bool> try_emplace_impl(KeyArgT &&Key, Ts &&...Args) {
492 auto [Bucket, Inserted] = lookupOrInsertIntoBucket(
493 std::forward<KeyArgT>(Key), std::forward<Ts>(Args)...);
494 return {makeIterator(Bucket), Inserted};
495 }
496
497 iterator makeIterator(BucketT *TheBucket) {
498 return iterator::makeIterator(TheBucket, buckets(), *this);
499 }
500
501 const_iterator makeConstIterator(const BucketT *TheBucket) const {
502 return const_iterator::makeIterator(TheBucket, buckets(), *this);
503 }
504
505 unsigned getNumEntries() const { return derived().getNumEntries(); }
506
507 void setNumEntries(unsigned Num) { derived().setNumEntries(Num); }
508
509 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
510
511 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
512
513 unsigned getNumTombstones() const { return derived().getNumTombstones(); }
514
515 void setNumTombstones(unsigned Num) { derived().setNumTombstones(Num); }
516
517 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
518
519 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
520
521 const BucketT *getBuckets() const { return derived().getBuckets(); }
522
523 BucketT *getBuckets() { return derived().getBuckets(); }
524
525 unsigned getNumBuckets() const { return derived().getNumBuckets(); }
526
527 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
528
529 const BucketT *getBucketsEnd() const {
530 return getBuckets() + getNumBuckets();
531 }
532
533 iterator_range<BucketT *> buckets() {
534 return llvm::make_range(getBuckets(), getBucketsEnd());
535 }
536
537 iterator_range<const BucketT *> buckets() const {
538 return llvm::make_range(getBuckets(), getBucketsEnd());
539 }
540
541 void grow(unsigned AtLeast) { derived().grow(AtLeast); }
542
543 template <typename LookupKeyT>
544 BucketT *findBucketForInsertion(const LookupKeyT &Lookup,
545 BucketT *TheBucket) {
547
548 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
549 // the buckets are empty (meaning that many are filled with tombstones),
550 // grow the table.
551 //
552 // The later case is tricky. For example, if we had one empty bucket with
553 // tons of tombstones, failing lookups (e.g. for insertion) would have to
554 // probe almost the entire table until it found the empty bucket. If the
555 // table completely filled with tombstones, no lookup would ever succeed,
556 // causing infinite loops in lookup.
557 unsigned NewNumEntries = getNumEntries() + 1;
558 unsigned NumBuckets = getNumBuckets();
559 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
560 this->grow(NumBuckets * 2);
561 LookupBucketFor(Lookup, TheBucket);
562 } else if (LLVM_UNLIKELY(NumBuckets -
563 (NewNumEntries + getNumTombstones()) <=
564 NumBuckets / 8)) {
565 this->grow(NumBuckets);
566 LookupBucketFor(Lookup, TheBucket);
567 }
568 assert(TheBucket);
569
570 // Only update the state after we've grown our bucket space appropriately
571 // so that when growing buckets we have self-consistent entry count.
572 incrementNumEntries();
573
574 // If we are writing over a tombstone, remember this.
575 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
576 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
577 decrementNumTombstones();
578
579 return TheBucket;
580 }
581
582 template <typename LookupKeyT>
583 const BucketT *doFind(const LookupKeyT &Val) const {
584 const BucketT *BucketsPtr = getBuckets();
585 const unsigned NumBuckets = getNumBuckets();
586 if (NumBuckets == 0)
587 return nullptr;
588
589 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
590 unsigned BucketNo = KeyInfoT::getHashValue(Val) & (NumBuckets - 1);
591 unsigned ProbeAmt = 1;
592 while (true) {
593 const BucketT *Bucket = BucketsPtr + BucketNo;
594 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
595 return Bucket;
596 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
597 return nullptr;
598
599 // Otherwise, it's a hash collision or a tombstone, continue quadratic
600 // probing.
601 BucketNo += ProbeAmt++;
602 BucketNo &= NumBuckets - 1;
603 }
604 }
605
606 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
607 return const_cast<BucketT *>(
608 static_cast<const DenseMapBase *>(this)->doFind(Val));
609 }
610
611 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
612 /// FoundBucket. If the bucket contains the key and a value, this returns
613 /// true, otherwise it returns a bucket with an empty marker or tombstone and
614 /// returns false.
615 template <typename LookupKeyT>
616 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
617 BucketT *BucketsPtr = getBuckets();
618 const unsigned NumBuckets = getNumBuckets();
619
620 if (NumBuckets == 0) {
621 FoundBucket = nullptr;
622 return false;
623 }
624
625 // FoundTombstone - Keep track of whether we find a tombstone while probing.
626 BucketT *FoundTombstone = nullptr;
627 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
628 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
629 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
630 !KeyInfoT::isEqual(Val, TombstoneKey) &&
631 "Empty/Tombstone value shouldn't be inserted into map!");
632
633 unsigned BucketNo = KeyInfoT::getHashValue(Val) & (NumBuckets - 1);
634 unsigned ProbeAmt = 1;
635 while (true) {
636 BucketT *ThisBucket = BucketsPtr + BucketNo;
637 // Found Val's bucket? If so, return it.
638 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
639 FoundBucket = ThisBucket;
640 return true;
641 }
642
643 // If we found an empty bucket, the key doesn't exist in the set.
644 // Insert it and return the default value.
645 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
646 // If we've already seen a tombstone while probing, fill it in instead
647 // of the empty bucket we eventually probed to.
648 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
649 return false;
650 }
651
652 // If this is a tombstone, remember it. If Val ends up not in the map, we
653 // prefer to return it than something that would require more probing.
654 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
655 !FoundTombstone)
656 FoundTombstone = ThisBucket; // Remember the first tombstone found.
657
658 // Otherwise, it's a hash collision or a tombstone, continue quadratic
659 // probing.
660 BucketNo += ProbeAmt++;
661 BucketNo &= (NumBuckets - 1);
662 }
663 }
664
665public:
666 /// Return the approximate size (in bytes) of the actual map.
667 /// This is just the raw memory used by DenseMap.
668 /// If entries are pointers to objects, the size of the referenced objects
669 /// are not included.
670 [[nodiscard]] size_t getMemorySize() const {
671 return getNumBuckets() * sizeof(BucketT);
672 }
673};
674
675/// Equality comparison for DenseMap.
676///
677/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
678/// is also in RHS, and that no additional pairs are in RHS.
679/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
680/// complexity is linear, worst case is O(N^2) (if every hash collides).
681template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
682 typename BucketT>
683[[nodiscard]] bool
686 if (LHS.size() != RHS.size())
687 return false;
688
689 for (auto &KV : LHS) {
690 auto I = RHS.find(KV.first);
691 if (I == RHS.end() || I->second != KV.second)
692 return false;
693 }
694
695 return true;
696}
697
698/// Inequality comparison for DenseMap.
699///
700/// Equivalent to !(LHS == RHS). See operator== for performance notes.
701template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
702 typename BucketT>
703[[nodiscard]] bool
708
709template <typename KeyT, typename ValueT,
710 typename KeyInfoT = DenseMapInfo<KeyT>,
712class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
713 KeyT, ValueT, KeyInfoT, BucketT> {
714 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
715
716 // Lift some types from the dependent base class into this class for
717 // simplicity of referring to them.
719
720 BucketT *Buckets;
721 unsigned NumEntries;
722 unsigned NumTombstones;
723 unsigned NumBuckets;
724
725public:
726 /// Create a DenseMap with an optional \p NumElementsToReserve to guarantee
727 /// that this number of elements can be inserted in the map without grow().
728 explicit DenseMap(unsigned NumElementsToReserve = 0) {
729 init(NumElementsToReserve);
730 }
731
732 DenseMap(const DenseMap &other) : BaseT() {
733 init(0);
734 this->copyFrom(other);
735 }
736
737 DenseMap(DenseMap &&other) : BaseT() {
738 init(0);
739 swap(other);
740 }
741
742 template <typename InputIt> DenseMap(const InputIt &I, const InputIt &E) {
743 init(std::distance(I, E));
744 this->insert(I, E);
745 }
746
747 template <typename RangeT>
750
751 DenseMap(std::initializer_list<typename BaseT::value_type> Vals)
752 : DenseMap(Vals.begin(), Vals.end()) {}
753
755 this->destroyAll();
756 deallocateBuckets();
757 }
758
760 this->incrementEpoch();
761 RHS.incrementEpoch();
762 std::swap(Buckets, RHS.Buckets);
763 std::swap(NumEntries, RHS.NumEntries);
764 std::swap(NumTombstones, RHS.NumTombstones);
765 std::swap(NumBuckets, RHS.NumBuckets);
766 }
767
768 DenseMap &operator=(const DenseMap &other) {
769 if (&other != this)
770 this->copyFrom(other);
771 return *this;
772 }
773
775 this->destroyAll();
776 deallocateBuckets();
777 init(0);
778 swap(other);
779 return *this;
780 }
781
782private:
783 unsigned getNumEntries() const { return NumEntries; }
784
785 void setNumEntries(unsigned Num) { NumEntries = Num; }
786
787 unsigned getNumTombstones() const { return NumTombstones; }
788
789 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
790
791 BucketT *getBuckets() const { return Buckets; }
792
793 unsigned getNumBuckets() const { return NumBuckets; }
794
795 void deallocateBuckets() {
796 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
797 }
798
799 bool allocateBuckets(unsigned Num) {
800 NumBuckets = Num;
801 if (NumBuckets == 0) {
802 Buckets = nullptr;
803 return false;
804 }
805
806 Buckets = static_cast<BucketT *>(
807 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
808 return true;
809 }
810
811 void init(unsigned InitNumEntries) {
812 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
813 if (allocateBuckets(InitBuckets)) {
814 this->BaseT::initEmpty();
815 } else {
816 NumEntries = 0;
817 NumTombstones = 0;
818 }
819 }
820
821 void grow(unsigned AtLeast) {
822 unsigned OldNumBuckets = NumBuckets;
823 BucketT *OldBuckets = Buckets;
824
825 allocateBuckets(std::max<unsigned>(
826 64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
827 assert(Buckets);
828 if (!OldBuckets) {
829 this->BaseT::initEmpty();
830 return;
831 }
832
833 this->moveFromOldBuckets(
834 llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
835
836 // Free the old table.
837 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
838 alignof(BucketT));
839 }
840
841 // Plan how to shrink the bucket table. Return:
842 // - {false, 0} to reuse the existing bucket table
843 // - {true, N} to reallocate a bucket table with N entries
844 std::pair<bool, unsigned> planShrinkAndClear() const {
845 unsigned NewNumBuckets = 0;
846 if (NumEntries)
847 NewNumBuckets = std::max(64u, 1u << (Log2_32_Ceil(NumEntries) + 1));
848 if (NewNumBuckets == NumBuckets)
849 return {false, 0}; // Reuse.
850 return {true, NewNumBuckets}; // Reallocate.
851 }
852};
853
854template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
855 typename KeyInfoT = DenseMapInfo<KeyT>,
858 : public DenseMapBase<
859 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
860 ValueT, KeyInfoT, BucketT> {
861 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
862
863 // Lift some types from the dependent base class into this class for
864 // simplicity of referring to them.
866
867 static_assert(isPowerOf2_64(InlineBuckets),
868 "InlineBuckets must be a power of 2.");
869
870 unsigned Small : 1;
871 unsigned NumEntries : 31;
872 unsigned NumTombstones;
873
874 struct LargeRep {
875 BucketT *Buckets;
876 unsigned NumBuckets;
878 return llvm::make_range(Buckets, Buckets + NumBuckets);
879 }
880 };
881
882 /// A "union" of an inline bucket array and the struct representing
883 /// a large bucket. This union will be discriminated by the 'Small' bit.
884 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
885
886public:
887 explicit SmallDenseMap(unsigned NumElementsToReserve = 0) {
888 init(NumElementsToReserve);
889 }
890
891 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
892 init(0);
893 this->copyFrom(other);
894 }
895
896 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
897 init(0);
898 swap(other);
899 }
900
901 template <typename InputIt>
902 SmallDenseMap(const InputIt &I, const InputIt &E) {
903 init(std::distance(I, E));
904 this->insert(I, E);
905 }
906
907 template <typename RangeT>
910
911 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
912 : SmallDenseMap(Vals.begin(), Vals.end()) {}
913
915 this->destroyAll();
916 deallocateBuckets();
917 }
918
920 unsigned TmpNumEntries = RHS.NumEntries;
921 RHS.NumEntries = NumEntries;
922 NumEntries = TmpNumEntries;
923 std::swap(NumTombstones, RHS.NumTombstones);
924
925 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
926 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
927 if (Small && RHS.Small) {
928 // If we're swapping inline bucket arrays, we have to cope with some of
929 // the tricky bits of DenseMap's storage system: the buckets are not
930 // fully initialized. Thus we swap every key, but we may have
931 // a one-directional move of the value.
932 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
933 BucketT *LHSB = &getInlineBuckets()[i],
934 *RHSB = &RHS.getInlineBuckets()[i];
935 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
936 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
937 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
938 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
939 if (hasLHSValue && hasRHSValue) {
940 // Swap together if we can...
941 std::swap(*LHSB, *RHSB);
942 continue;
943 }
944 // Swap separately and handle any asymmetry.
945 std::swap(LHSB->getFirst(), RHSB->getFirst());
946 if (hasLHSValue) {
947 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
948 LHSB->getSecond().~ValueT();
949 } else if (hasRHSValue) {
950 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
951 RHSB->getSecond().~ValueT();
952 }
953 }
954 return;
955 }
956 if (!Small && !RHS.Small) {
957 std::swap(*getLargeRep(), *RHS.getLargeRep());
958 return;
959 }
960
961 SmallDenseMap &SmallSide = Small ? *this : RHS;
962 SmallDenseMap &LargeSide = Small ? RHS : *this;
963
964 // First stash the large side's rep and move the small side across.
965 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
966 LargeSide.getLargeRep()->~LargeRep();
967 LargeSide.Small = true;
968 // This is similar to the standard move-from-old-buckets, but the bucket
969 // count hasn't actually rotated in this case. So we have to carefully
970 // move construct the keys and values into their new locations, but there
971 // is no need to re-hash things.
972 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
973 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
974 *OldB = &SmallSide.getInlineBuckets()[i];
975 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
976 OldB->getFirst().~KeyT();
977 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
978 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
979 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
980 OldB->getSecond().~ValueT();
981 }
982 }
983
984 // The hard part of moving the small buckets across is done, just move
985 // the TmpRep into its new home.
986 SmallSide.Small = false;
987 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
988 }
989
991 if (&other != this)
992 this->copyFrom(other);
993 return *this;
994 }
995
997 this->destroyAll();
998 deallocateBuckets();
999 init(0);
1000 swap(other);
1001 return *this;
1002 }
1003
1004private:
1005 unsigned getNumEntries() const { return NumEntries; }
1006
1007 void setNumEntries(unsigned Num) {
1008 // NumEntries is hardcoded to be 31 bits wide.
1009 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1010 NumEntries = Num;
1011 }
1012
1013 unsigned getNumTombstones() const { return NumTombstones; }
1014
1015 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
1016
1017 const BucketT *getInlineBuckets() const {
1018 assert(Small);
1019 // Note that this cast does not violate aliasing rules as we assert that
1020 // the memory's dynamic type is the small, inline bucket buffer, and the
1021 // 'storage' is a POD containing a char buffer.
1022 return reinterpret_cast<const BucketT *>(&storage);
1023 }
1024
1025 BucketT *getInlineBuckets() {
1026 return const_cast<BucketT *>(
1027 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1028 }
1029
1030 const LargeRep *getLargeRep() const {
1031 assert(!Small);
1032 // Note, same rule about aliasing as with getInlineBuckets.
1033 return reinterpret_cast<const LargeRep *>(&storage);
1034 }
1035
1036 LargeRep *getLargeRep() {
1037 return const_cast<LargeRep *>(
1038 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1039 }
1040
1041 const BucketT *getBuckets() const {
1042 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1043 }
1044
1045 BucketT *getBuckets() {
1046 return const_cast<BucketT *>(
1047 const_cast<const SmallDenseMap *>(this)->getBuckets());
1048 }
1049
1050 unsigned getNumBuckets() const {
1051 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1052 }
1053
1054 iterator_range<BucketT *> inlineBuckets() {
1055 BucketT *Begin = getInlineBuckets();
1056 return llvm::make_range(Begin, Begin + InlineBuckets);
1057 }
1058
1059 void deallocateBuckets() {
1060 if (Small)
1061 return;
1062
1063 deallocate_buffer(getLargeRep()->Buckets,
1064 sizeof(BucketT) * getLargeRep()->NumBuckets,
1065 alignof(BucketT));
1066 getLargeRep()->~LargeRep();
1067 }
1068
1069 bool allocateBuckets(unsigned Num) {
1070 if (Num <= InlineBuckets) {
1071 Small = true;
1072 } else {
1073 Small = false;
1074 BucketT *NewBuckets = static_cast<BucketT *>(
1075 allocate_buffer(sizeof(BucketT) * Num, alignof(BucketT)));
1076 new (getLargeRep()) LargeRep{NewBuckets, Num};
1077 }
1078 return true;
1079 }
1080
1081 void init(unsigned InitNumEntries) {
1082 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
1083 allocateBuckets(InitBuckets);
1084 this->BaseT::initEmpty();
1085 }
1086
1087 void grow(unsigned AtLeast) {
1088 if (AtLeast > InlineBuckets)
1089 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
1090
1091 if (Small) {
1092 // First move the inline buckets into a temporary storage.
1093 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1094 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1095 BucketT *TmpEnd = TmpBegin;
1096
1097 // Loop over the buckets, moving non-empty, non-tombstones into the
1098 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1099 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
1100 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
1101 for (BucketT &B : inlineBuckets()) {
1102 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1103 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1104 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1105 "Too many inline buckets!");
1106 ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1107 ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1108 ++TmpEnd;
1109 B.getSecond().~ValueT();
1110 }
1111 B.getFirst().~KeyT();
1112 }
1113
1114 // AtLeast == InlineBuckets can happen if there are many tombstones,
1115 // and grow() is used to remove them. Usually we always switch to the
1116 // large rep here.
1117 allocateBuckets(AtLeast);
1118 this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1119 return;
1120 }
1121
1122 LargeRep OldRep = std::move(*getLargeRep());
1123 getLargeRep()->~LargeRep();
1124 allocateBuckets(AtLeast);
1125
1126 this->moveFromOldBuckets(OldRep.buckets());
1127
1128 // Free the old table.
1129 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1130 alignof(BucketT));
1131 }
1132
1133 // Plan how to shrink the bucket table. Return:
1134 // - {false, 0} to reuse the existing bucket table
1135 // - {true, N} to reallocate a bucket table with N entries
1136 std::pair<bool, unsigned> planShrinkAndClear() const {
1137 unsigned NewNumBuckets = 0;
1138 if (!this->empty()) {
1139 NewNumBuckets = 1u << (Log2_32_Ceil(this->size()) + 1);
1140 if (NewNumBuckets > InlineBuckets)
1141 NewNumBuckets = std::max(64u, NewNumBuckets);
1142 }
1143 bool Reuse = Small ? NewNumBuckets <= InlineBuckets
1144 : NewNumBuckets == getLargeRep()->NumBuckets;
1145 if (Reuse)
1146 return {false, 0}; // Reuse.
1147 return {true, NewNumBuckets}; // Reallocate.
1148 }
1149};
1150
1151template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1152 bool IsConst>
1153class DenseMapIterator : DebugEpochBase::HandleBase {
1154 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1155 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1156
1157public:
1159 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1162 using iterator_category = std::forward_iterator_tag;
1163
1164private:
1165 using BucketItTy =
1166 std::conditional_t<shouldReverseIterate<KeyT>(),
1167 std::reverse_iterator<pointer>, pointer>;
1168
1169 BucketItTy Ptr = {};
1170 BucketItTy End = {};
1171
1172 DenseMapIterator(BucketItTy Pos, BucketItTy E, const DebugEpochBase &Epoch)
1173 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1174 assert(isHandleInSync() && "invalid construction!");
1175 }
1176
1177public:
1178 DenseMapIterator() = default;
1179
1180 static DenseMapIterator makeBegin(iterator_range<pointer> Buckets,
1181 bool IsEmpty, const DebugEpochBase &Epoch) {
1182 // When the map is empty, avoid the overhead of advancing/retreating past
1183 // empty buckets.
1184 if (IsEmpty)
1185 return makeEnd(Buckets, Epoch);
1186 auto R = maybeReverse(Buckets);
1187 DenseMapIterator Iter(R.begin(), R.end(), Epoch);
1188 Iter.AdvancePastEmptyBuckets();
1189 return Iter;
1190 }
1191
1192 static DenseMapIterator makeEnd(iterator_range<pointer> Buckets,
1193 const DebugEpochBase &Epoch) {
1194 auto R = maybeReverse(Buckets);
1195 return DenseMapIterator(R.end(), R.end(), Epoch);
1196 }
1197
1198 static DenseMapIterator makeIterator(pointer P,
1200 const DebugEpochBase &Epoch) {
1201 auto R = maybeReverse(Buckets);
1202 constexpr int Offset = shouldReverseIterate<KeyT>() ? 1 : 0;
1203 return DenseMapIterator(BucketItTy(P + Offset), R.end(), Epoch);
1204 }
1205
1206 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1207 // for const iterator destinations so it doesn't end up as a user defined copy
1208 // constructor.
1209 template <bool IsConstSrc,
1210 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1212 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1213 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1214
1215 [[nodiscard]] reference operator*() const {
1216 assert(isHandleInSync() && "invalid iterator access!");
1217 assert(Ptr != End && "dereferencing end() iterator");
1218 return *Ptr;
1219 }
1220 [[nodiscard]] pointer operator->() const { return &operator*(); }
1221
1222 [[nodiscard]] friend bool operator==(const DenseMapIterator &LHS,
1223 const DenseMapIterator &RHS) {
1224 assert((!LHS.getEpochAddress() || LHS.isHandleInSync()) &&
1225 "handle not in sync!");
1226 assert((!RHS.getEpochAddress() || RHS.isHandleInSync()) &&
1227 "handle not in sync!");
1228 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1229 "comparing incomparable iterators!");
1230 return LHS.Ptr == RHS.Ptr;
1231 }
1232
1233 [[nodiscard]] friend bool operator!=(const DenseMapIterator &LHS,
1234 const DenseMapIterator &RHS) {
1235 return !(LHS == RHS);
1236 }
1237
1238 inline DenseMapIterator &operator++() { // Preincrement
1239 assert(isHandleInSync() && "invalid iterator access!");
1240 assert(Ptr != End && "incrementing end() iterator");
1241 ++Ptr;
1242 AdvancePastEmptyBuckets();
1243 return *this;
1244 }
1245 DenseMapIterator operator++(int) { // Postincrement
1246 assert(isHandleInSync() && "invalid iterator access!");
1247 DenseMapIterator tmp = *this;
1248 ++*this;
1249 return tmp;
1250 }
1251
1252private:
1253 void AdvancePastEmptyBuckets() {
1254 assert(Ptr <= End);
1255 const KeyT Empty = KeyInfoT::getEmptyKey();
1256 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1257
1258 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1259 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1260 ++Ptr;
1261 }
1262
1263 static auto maybeReverse(iterator_range<pointer> Range) {
1264 if constexpr (shouldReverseIterate<KeyT>())
1265 return reverse(Range);
1266 else
1267 return Range;
1268 }
1269};
1270
1271template <typename KeyT, typename ValueT, typename KeyInfoT>
1272[[nodiscard]] inline size_t
1274 return X.getMemorySize();
1275}
1276
1277} // end namespace llvm
1278
1279#endif // LLVM_ADT_DENSEMAP_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition Compiler.h:336
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
#define I(x, y, z)
Definition MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
This file contains some templates that are useful if you are working with the STL at all.
This file contains library features backported from future STL versions.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
LocallyHashedType DenseMapInfo< LocallyHashedType >::Tombstone
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
void copyFrom(const DerivedT &other)
Definition DenseMap.h:435
unsigned size_type
Definition DenseMap.h:69
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:248
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition DenseMap.h:240
bool erase(const KeyT &Val)
Definition DenseMap.h:322
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition DenseMap.h:266
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition DenseMap.h:197
const_iterator end() const
Definition DenseMap.h:87
void moveFromOldBuckets(iterator_range< BucketT * > OldBuckets)
Definition DenseMap.h:410
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:191
unsigned size() const
Definition DenseMap.h:110
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition DenseMap.h:181
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > emplace_or_assign(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:307
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition DenseMap.h:280
iterator begin()
Definition DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition DenseMap.h:224
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition DenseMap.h:352
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:256
const_iterator begin() const
Definition DenseMap.h:84
std::pair< iterator, bool > emplace_or_assign(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:315
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:286
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition DenseMap.h:359
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition DenseMap.h:299
ValueT lookup_or(const_arg_type_t< KeyT > Val, U &&Default) const
Definition DenseMap.h:215
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition DenseMap.h:401
ValueT & operator[](const KeyT &Key)
Definition DenseMap.h:341
BucketT value_type
Definition DenseMap.h:72
auto keys() const
Definition DenseMap.h:101
void shrink_and_clear()
Definition DenseMap.h:157
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:233
void erase(iterator I)
Definition DenseMap.h:333
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:291
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition DenseMap.h:114
ValueT & operator[](KeyT &&Key)
Definition DenseMap.h:345
auto values() const
Definition DenseMap.h:105
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition DenseMap.h:670
std::conditional_t< IsConst, const BucketT, BucketT > value_type
Definition DenseMap.h:1159
static DenseMapIterator makeIterator(pointer P, iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1198
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1233
DenseMapIterator & operator++()
Definition DenseMap.h:1238
pointer operator->() const
Definition DenseMap.h:1220
reference operator*() const
Definition DenseMap.h:1215
static DenseMapIterator makeBegin(iterator_range< pointer > Buckets, bool IsEmpty, const DebugEpochBase &Epoch)
Definition DenseMap.h:1180
DenseMapIterator operator++(int)
Definition DenseMap.h:1245
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition DenseMap.h:1211
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1222
static DenseMapIterator makeEnd(iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1192
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:751
DenseMap(unsigned NumElementsToReserve=0)
Create a DenseMap with an optional NumElementsToReserve to guarantee that this number of elements can...
Definition DenseMap.h:728
DenseMap & operator=(DenseMap &&other)
Definition DenseMap.h:774
DenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:748
DenseMap(const DenseMap &other)
Definition DenseMap.h:732
void swap(DenseMap &RHS)
Definition DenseMap.h:759
DenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:742
DenseMap(DenseMap &&other)
Definition DenseMap.h:737
DenseMap & operator=(const DenseMap &other)
Definition DenseMap.h:768
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:902
void swap(SmallDenseMap &RHS)
Definition DenseMap.h:919
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition DenseMap.h:996
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition DenseMap.h:990
SmallDenseMap(unsigned NumElementsToReserve=0)
Definition DenseMap.h:887
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:911
SmallDenseMap(SmallDenseMap &&other)
Definition DenseMap.h:896
SmallDenseMap(const SmallDenseMap &other)
Definition DenseMap.h:891
SmallDenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:908
A range adaptor for a pair of iterators.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition ADL.h:123
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:477
constexpr auto adl_begin(RangeT &&range) -> decltype(adl_detail::begin_impl(std::forward< RangeT >(range)))
Returns the begin iterator to range using std::begin and function found through Argument-Dependent Lo...
Definition ADL.h:78
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition BitVector.h:844
bool operator!=(uint64_t V1, const APInt &V2)
Definition APInt.h:2113
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
constexpr auto adl_end(RangeT &&range) -> decltype(adl_detail::end_impl(std::forward< RangeT >(range)))
Returns the end iterator to range using std::end and functions found through Argument-Dependent Looku...
Definition ADL.h:86
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto map_range(ContainerTy &&C, FuncTy F)
Definition STLExtras.h:364
LLVM_ABI LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:15
LLVM_ABI void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:27
constexpr bool shouldReverseIterate()
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1867
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
std::conditional_t< std::is_pointer_v< T >, typename add_const_past_pointer< T >::type, const T & > type
Definition type_traits.h:54
const ValueT & getSecond() const
Definition DenseMap.h:51
const KeyT & getFirst() const
Definition DenseMap.h:49