LLVM 17.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstring>
29#include <initializer_list>
30#include <iterator>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37namespace detail {
38
39// We extend a pair to allow users to override the bucket type with their own
40// implementation without requiring two members.
41template <typename KeyT, typename ValueT>
42struct DenseMapPair : public std::pair<KeyT, ValueT> {
43 using std::pair<KeyT, ValueT>::pair;
44
45 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
46 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49};
50
51} // end namespace detail
52
53template <typename KeyT, typename ValueT,
54 typename KeyInfoT = DenseMapInfo<KeyT>,
56 bool IsConst = false>
57class DenseMapIterator;
58
59template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60 typename BucketT>
62 template <typename T>
63 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64
65public:
67 using key_type = KeyT;
69 using value_type = BucketT;
70
74
75 inline iterator begin() {
76 // When the map is empty, avoid the overhead of advancing/retreating past
77 // empty buckets.
78 if (empty())
79 return end();
80 if (shouldReverseIterate<KeyT>())
81 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82 return makeIterator(getBuckets(), getBucketsEnd(), *this);
83 }
84 inline iterator end() {
85 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86 }
87 inline const_iterator begin() const {
88 if (empty())
89 return end();
90 if (shouldReverseIterate<KeyT>())
91 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93 }
94 inline const_iterator end() const {
95 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96 }
97
98 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
99 unsigned size() const { return getNumEntries(); }
100
101 /// Grow the densemap so that it can contain at least \p NumEntries items
102 /// before resizing again.
103 void reserve(size_type NumEntries) {
104 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
106 if (NumBuckets > getNumBuckets())
107 grow(NumBuckets);
108 }
109
110 void clear() {
112 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
113
114 // If the capacity of the array is huge, and the # elements used is small,
115 // shrink the array.
116 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
117 shrink_and_clear();
118 return;
119 }
120
121 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
122 if (std::is_trivially_destructible<ValueT>::value) {
123 // Use a simpler loop when values don't need destruction.
124 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
125 P->getFirst() = EmptyKey;
126 } else {
127 unsigned NumEntries = getNumEntries();
128 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
129 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
130 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
131 P->getSecond().~ValueT();
132 --NumEntries;
133 }
134 P->getFirst() = EmptyKey;
135 }
136 }
137 assert(NumEntries == 0 && "Node count imbalance!");
138 (void)NumEntries;
139 }
140 setNumEntries(0);
141 setNumTombstones(0);
142 }
143
144 /// Return 1 if the specified key is in the map, 0 otherwise.
145 size_type count(const_arg_type_t<KeyT> Val) const {
146 const BucketT *TheBucket;
147 return LookupBucketFor(Val, TheBucket) ? 1 : 0;
148 }
149
150 iterator find(const_arg_type_t<KeyT> Val) {
151 BucketT *TheBucket;
152 if (LookupBucketFor(Val, TheBucket))
153 return makeIterator(TheBucket,
154 shouldReverseIterate<KeyT>() ? getBuckets()
155 : getBucketsEnd(),
156 *this, true);
157 return end();
158 }
159 const_iterator find(const_arg_type_t<KeyT> Val) const {
160 const BucketT *TheBucket;
161 if (LookupBucketFor(Val, TheBucket))
162 return makeConstIterator(TheBucket,
163 shouldReverseIterate<KeyT>() ? getBuckets()
164 : getBucketsEnd(),
165 *this, true);
166 return end();
167 }
168
169 /// Alternate version of find() which allows a different, and possibly
170 /// less expensive, key type.
171 /// The DenseMapInfo is responsible for supplying methods
172 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
173 /// type used.
174 template<class LookupKeyT>
175 iterator find_as(const LookupKeyT &Val) {
176 BucketT *TheBucket;
177 if (LookupBucketFor(Val, TheBucket))
178 return makeIterator(TheBucket,
179 shouldReverseIterate<KeyT>() ? getBuckets()
180 : getBucketsEnd(),
181 *this, true);
182 return end();
183 }
184 template<class LookupKeyT>
185 const_iterator find_as(const LookupKeyT &Val) const {
186 const BucketT *TheBucket;
187 if (LookupBucketFor(Val, TheBucket))
188 return makeConstIterator(TheBucket,
189 shouldReverseIterate<KeyT>() ? getBuckets()
190 : getBucketsEnd(),
191 *this, true);
192 return end();
193 }
194
195 /// lookup - Return the entry for the specified key, or a default
196 /// constructed value if no such entry exists.
197 ValueT lookup(const_arg_type_t<KeyT> Val) const {
198 const BucketT *TheBucket;
199 if (LookupBucketFor(Val, TheBucket))
200 return TheBucket->getSecond();
201 return ValueT();
202 }
203
204 // Inserts key,value pair into the map if the key isn't already in the map.
205 // If the key is already in the map, it returns false and doesn't update the
206 // value.
207 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
208 return try_emplace(KV.first, KV.second);
209 }
210
211 // Inserts key,value pair into the map if the key isn't already in the map.
212 // If the key is already in the map, it returns false and doesn't update the
213 // value.
214 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
215 return try_emplace(std::move(KV.first), std::move(KV.second));
216 }
217
218 // Inserts key,value pair into the map if the key isn't already in the map.
219 // The value is constructed in-place if the key is not in the map, otherwise
220 // it is not moved.
221 template <typename... Ts>
222 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
223 BucketT *TheBucket;
224 if (LookupBucketFor(Key, TheBucket))
225 return std::make_pair(makeIterator(TheBucket,
226 shouldReverseIterate<KeyT>()
227 ? getBuckets()
228 : getBucketsEnd(),
229 *this, true),
230 false); // Already in map.
231
232 // Otherwise, insert the new element.
233 TheBucket =
234 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
235 return std::make_pair(makeIterator(TheBucket,
236 shouldReverseIterate<KeyT>()
237 ? getBuckets()
238 : getBucketsEnd(),
239 *this, true),
240 true);
241 }
242
243 // Inserts key,value pair into the map if the key isn't already in the map.
244 // The value is constructed in-place if the key is not in the map, otherwise
245 // it is not moved.
246 template <typename... Ts>
247 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
248 BucketT *TheBucket;
249 if (LookupBucketFor(Key, TheBucket))
250 return std::make_pair(makeIterator(TheBucket,
251 shouldReverseIterate<KeyT>()
252 ? getBuckets()
253 : getBucketsEnd(),
254 *this, true),
255 false); // Already in map.
256
257 // Otherwise, insert the new element.
258 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
259 return std::make_pair(makeIterator(TheBucket,
260 shouldReverseIterate<KeyT>()
261 ? getBuckets()
262 : getBucketsEnd(),
263 *this, true),
264 true);
265 }
266
267 /// Alternate version of insert() which allows a different, and possibly
268 /// less expensive, key type.
269 /// The DenseMapInfo is responsible for supplying methods
270 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
271 /// type used.
272 template <typename LookupKeyT>
273 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
274 const LookupKeyT &Val) {
275 BucketT *TheBucket;
276 if (LookupBucketFor(Val, TheBucket))
277 return std::make_pair(makeIterator(TheBucket,
278 shouldReverseIterate<KeyT>()
279 ? getBuckets()
280 : getBucketsEnd(),
281 *this, true),
282 false); // Already in map.
283
284 // Otherwise, insert the new element.
285 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
286 std::move(KV.second), Val);
287 return std::make_pair(makeIterator(TheBucket,
288 shouldReverseIterate<KeyT>()
289 ? getBuckets()
290 : getBucketsEnd(),
291 *this, true),
292 true);
293 }
294
295 /// insert - Range insertion of pairs.
296 template<typename InputIt>
297 void insert(InputIt I, InputIt E) {
298 for (; I != E; ++I)
299 insert(*I);
300 }
301
302 bool erase(const KeyT &Val) {
303 BucketT *TheBucket;
304 if (!LookupBucketFor(Val, TheBucket))
305 return false; // not in map.
306
307 TheBucket->getSecond().~ValueT();
308 TheBucket->getFirst() = getTombstoneKey();
309 decrementNumEntries();
310 incrementNumTombstones();
311 return true;
312 }
314 BucketT *TheBucket = &*I;
315 TheBucket->getSecond().~ValueT();
316 TheBucket->getFirst() = getTombstoneKey();
317 decrementNumEntries();
318 incrementNumTombstones();
319 }
320
322 BucketT *TheBucket;
323 if (LookupBucketFor(Key, TheBucket))
324 return *TheBucket;
325
326 return *InsertIntoBucket(TheBucket, Key);
327 }
328
329 ValueT &operator[](const KeyT &Key) {
330 return FindAndConstruct(Key).second;
331 }
332
334 BucketT *TheBucket;
335 if (LookupBucketFor(Key, TheBucket))
336 return *TheBucket;
337
338 return *InsertIntoBucket(TheBucket, std::move(Key));
339 }
340
342 return FindAndConstruct(std::move(Key)).second;
343 }
344
345 /// isPointerIntoBucketsArray - Return true if the specified pointer points
346 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
347 /// value in the DenseMap).
348 bool isPointerIntoBucketsArray(const void *Ptr) const {
349 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
350 }
351
352 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
353 /// array. In conjunction with the previous method, this can be used to
354 /// determine whether an insertion caused the DenseMap to reallocate.
355 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
356
357protected:
358 DenseMapBase() = default;
359
360 void destroyAll() {
361 if (getNumBuckets() == 0) // Nothing to do.
362 return;
363
364 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
365 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
366 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
367 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
368 P->getSecond().~ValueT();
369 P->getFirst().~KeyT();
370 }
371 }
372
373 void initEmpty() {
374 setNumEntries(0);
375 setNumTombstones(0);
376
377 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
378 "# initial buckets must be a power of two!");
379 const KeyT EmptyKey = getEmptyKey();
380 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
381 ::new (&B->getFirst()) KeyT(EmptyKey);
382 }
383
384 /// Returns the number of buckets to allocate to ensure that the DenseMap can
385 /// accommodate \p NumEntries without need to grow().
386 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
387 // Ensure that "NumEntries * 4 < NumBuckets * 3"
388 if (NumEntries == 0)
389 return 0;
390 // +1 is required because of the strict equality.
391 // For example if NumEntries is 48, we need to return 401.
392 return NextPowerOf2(NumEntries * 4 / 3 + 1);
393 }
394
395 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
396 initEmpty();
397
398 // Insert all the old elements.
399 const KeyT EmptyKey = getEmptyKey();
400 const KeyT TombstoneKey = getTombstoneKey();
401 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
402 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
403 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
404 // Insert the key/value into the new table.
405 BucketT *DestBucket;
406 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
407 (void)FoundVal; // silence warning.
408 assert(!FoundVal && "Key already in new map?");
409 DestBucket->getFirst() = std::move(B->getFirst());
410 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
411 incrementNumEntries();
412
413 // Free the value.
414 B->getSecond().~ValueT();
415 }
416 B->getFirst().~KeyT();
417 }
418 }
419
420 template <typename OtherBaseT>
423 assert(&other != this);
424 assert(getNumBuckets() == other.getNumBuckets());
425
426 setNumEntries(other.getNumEntries());
427 setNumTombstones(other.getNumTombstones());
428
429 if (std::is_trivially_copyable<KeyT>::value &&
430 std::is_trivially_copyable<ValueT>::value)
431 memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
432 getNumBuckets() * sizeof(BucketT));
433 else
434 for (size_t i = 0; i < getNumBuckets(); ++i) {
435 ::new (&getBuckets()[i].getFirst())
436 KeyT(other.getBuckets()[i].getFirst());
437 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
438 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
439 ::new (&getBuckets()[i].getSecond())
440 ValueT(other.getBuckets()[i].getSecond());
441 }
442 }
443
444 static unsigned getHashValue(const KeyT &Val) {
445 return KeyInfoT::getHashValue(Val);
446 }
447
448 template<typename LookupKeyT>
449 static unsigned getHashValue(const LookupKeyT &Val) {
450 return KeyInfoT::getHashValue(Val);
451 }
452
453 static const KeyT getEmptyKey() {
454 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
455 "Must pass the derived type to this template!");
456 return KeyInfoT::getEmptyKey();
457 }
458
459 static const KeyT getTombstoneKey() {
460 return KeyInfoT::getTombstoneKey();
461 }
462
463private:
464 iterator makeIterator(BucketT *P, BucketT *E,
465 DebugEpochBase &Epoch,
466 bool NoAdvance=false) {
467 if (shouldReverseIterate<KeyT>()) {
468 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
469 return iterator(B, E, Epoch, NoAdvance);
470 }
471 return iterator(P, E, Epoch, NoAdvance);
472 }
473
474 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
475 const DebugEpochBase &Epoch,
476 const bool NoAdvance=false) const {
477 if (shouldReverseIterate<KeyT>()) {
478 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
479 return const_iterator(B, E, Epoch, NoAdvance);
480 }
481 return const_iterator(P, E, Epoch, NoAdvance);
482 }
483
484 unsigned getNumEntries() const {
485 return static_cast<const DerivedT *>(this)->getNumEntries();
486 }
487
488 void setNumEntries(unsigned Num) {
489 static_cast<DerivedT *>(this)->setNumEntries(Num);
490 }
491
492 void incrementNumEntries() {
493 setNumEntries(getNumEntries() + 1);
494 }
495
496 void decrementNumEntries() {
497 setNumEntries(getNumEntries() - 1);
498 }
499
500 unsigned getNumTombstones() const {
501 return static_cast<const DerivedT *>(this)->getNumTombstones();
502 }
503
504 void setNumTombstones(unsigned Num) {
505 static_cast<DerivedT *>(this)->setNumTombstones(Num);
506 }
507
508 void incrementNumTombstones() {
509 setNumTombstones(getNumTombstones() + 1);
510 }
511
512 void decrementNumTombstones() {
513 setNumTombstones(getNumTombstones() - 1);
514 }
515
516 const BucketT *getBuckets() const {
517 return static_cast<const DerivedT *>(this)->getBuckets();
518 }
519
520 BucketT *getBuckets() {
521 return static_cast<DerivedT *>(this)->getBuckets();
522 }
523
524 unsigned getNumBuckets() const {
525 return static_cast<const DerivedT *>(this)->getNumBuckets();
526 }
527
528 BucketT *getBucketsEnd() {
529 return getBuckets() + getNumBuckets();
530 }
531
532 const BucketT *getBucketsEnd() const {
533 return getBuckets() + getNumBuckets();
534 }
535
536 void grow(unsigned AtLeast) {
537 static_cast<DerivedT *>(this)->grow(AtLeast);
538 }
539
540 void shrink_and_clear() {
541 static_cast<DerivedT *>(this)->shrink_and_clear();
542 }
543
544 template <typename KeyArg, typename... ValueArgs>
545 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
546 ValueArgs &&... Values) {
547 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
548
549 TheBucket->getFirst() = std::forward<KeyArg>(Key);
550 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
551 return TheBucket;
552 }
553
554 template <typename LookupKeyT>
555 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
556 ValueT &&Value, LookupKeyT &Lookup) {
557 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
558
559 TheBucket->getFirst() = std::move(Key);
560 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
561 return TheBucket;
562 }
563
564 template <typename LookupKeyT>
565 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
566 BucketT *TheBucket) {
568
569 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
570 // the buckets are empty (meaning that many are filled with tombstones),
571 // grow the table.
572 //
573 // The later case is tricky. For example, if we had one empty bucket with
574 // tons of tombstones, failing lookups (e.g. for insertion) would have to
575 // probe almost the entire table until it found the empty bucket. If the
576 // table completely filled with tombstones, no lookup would ever succeed,
577 // causing infinite loops in lookup.
578 unsigned NewNumEntries = getNumEntries() + 1;
579 unsigned NumBuckets = getNumBuckets();
580 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
581 this->grow(NumBuckets * 2);
582 LookupBucketFor(Lookup, TheBucket);
583 NumBuckets = getNumBuckets();
584 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
585 NumBuckets/8)) {
586 this->grow(NumBuckets);
587 LookupBucketFor(Lookup, TheBucket);
588 }
589 assert(TheBucket);
590
591 // Only update the state after we've grown our bucket space appropriately
592 // so that when growing buckets we have self-consistent entry count.
593 incrementNumEntries();
594
595 // If we are writing over a tombstone, remember this.
596 const KeyT EmptyKey = getEmptyKey();
597 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
598 decrementNumTombstones();
599
600 return TheBucket;
601 }
602
603 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
604 /// FoundBucket. If the bucket contains the key and a value, this returns
605 /// true, otherwise it returns a bucket with an empty marker or tombstone and
606 /// returns false.
607 template<typename LookupKeyT>
608 bool LookupBucketFor(const LookupKeyT &Val,
609 const BucketT *&FoundBucket) const {
610 const BucketT *BucketsPtr = getBuckets();
611 const unsigned NumBuckets = getNumBuckets();
612
613 if (NumBuckets == 0) {
614 FoundBucket = nullptr;
615 return false;
616 }
617
618 // FoundTombstone - Keep track of whether we find a tombstone while probing.
619 const BucketT *FoundTombstone = nullptr;
620 const KeyT EmptyKey = getEmptyKey();
621 const KeyT TombstoneKey = getTombstoneKey();
622 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
623 !KeyInfoT::isEqual(Val, TombstoneKey) &&
624 "Empty/Tombstone value shouldn't be inserted into map!");
625
626 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
627 unsigned ProbeAmt = 1;
628 while (true) {
629 const BucketT *ThisBucket = BucketsPtr + BucketNo;
630 // Found Val's bucket? If so, return it.
631 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
632 FoundBucket = ThisBucket;
633 return true;
634 }
635
636 // If we found an empty bucket, the key doesn't exist in the set.
637 // Insert it and return the default value.
638 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
639 // If we've already seen a tombstone while probing, fill it in instead
640 // of the empty bucket we eventually probed to.
641 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
642 return false;
643 }
644
645 // If this is a tombstone, remember it. If Val ends up not in the map, we
646 // prefer to return it than something that would require more probing.
647 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
648 !FoundTombstone)
649 FoundTombstone = ThisBucket; // Remember the first tombstone found.
650
651 // Otherwise, it's a hash collision or a tombstone, continue quadratic
652 // probing.
653 BucketNo += ProbeAmt++;
654 BucketNo &= (NumBuckets-1);
655 }
656 }
657
658 template <typename LookupKeyT>
659 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
660 const BucketT *ConstFoundBucket;
661 bool Result = const_cast<const DenseMapBase *>(this)
662 ->LookupBucketFor(Val, ConstFoundBucket);
663 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
664 return Result;
665 }
666
667public:
668 /// Return the approximate size (in bytes) of the actual map.
669 /// This is just the raw memory used by DenseMap.
670 /// If entries are pointers to objects, the size of the referenced objects
671 /// are not included.
672 size_t getMemorySize() const {
673 return getNumBuckets() * sizeof(BucketT);
674 }
675};
676
677/// Equality comparison for DenseMap.
678///
679/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
680/// is also in RHS, and that no additional pairs are in RHS.
681/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
682/// complexity is linear, worst case is O(N^2) (if every hash collides).
683template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
684 typename BucketT>
688 if (LHS.size() != RHS.size())
689 return false;
690
691 for (auto &KV : LHS) {
692 auto I = RHS.find(KV.first);
693 if (I == RHS.end() || I->second != KV.second)
694 return false;
695 }
696
697 return true;
698}
699
700/// Inequality comparison for DenseMap.
701///
702/// Equivalent to !(LHS == RHS). See operator== for performance notes.
703template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
704 typename BucketT>
708 return !(LHS == RHS);
709}
710
711template <typename KeyT, typename ValueT,
712 typename KeyInfoT = DenseMapInfo<KeyT>,
714class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
715 KeyT, ValueT, KeyInfoT, BucketT> {
716 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
717
718 // Lift some types from the dependent base class into this class for
719 // simplicity of referring to them.
721
722 BucketT *Buckets;
723 unsigned NumEntries;
724 unsigned NumTombstones;
725 unsigned NumBuckets;
726
727public:
728 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
729 /// this number of elements can be inserted in the map without grow()
730 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
731
732 DenseMap(const DenseMap &other) : BaseT() {
733 init(0);
734 copyFrom(other);
735 }
736
737 DenseMap(DenseMap &&other) : BaseT() {
738 init(0);
739 swap(other);
740 }
741
742 template<typename InputIt>
743 DenseMap(const InputIt &I, const InputIt &E) {
744 init(std::distance(I, E));
745 this->insert(I, E);
746 }
747
748 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
749 init(Vals.size());
750 this->insert(Vals.begin(), Vals.end());
751 }
752
754 this->destroyAll();
755 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
756 }
757
759 this->incrementEpoch();
760 RHS.incrementEpoch();
761 std::swap(Buckets, RHS.Buckets);
762 std::swap(NumEntries, RHS.NumEntries);
763 std::swap(NumTombstones, RHS.NumTombstones);
764 std::swap(NumBuckets, RHS.NumBuckets);
765 }
766
767 DenseMap& operator=(const DenseMap& other) {
768 if (&other != this)
769 copyFrom(other);
770 return *this;
771 }
772
774 this->destroyAll();
775 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
776 init(0);
777 swap(other);
778 return *this;
779 }
780
781 void copyFrom(const DenseMap& other) {
782 this->destroyAll();
783 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
784 if (allocateBuckets(other.NumBuckets)) {
785 this->BaseT::copyFrom(other);
786 } else {
787 NumEntries = 0;
788 NumTombstones = 0;
789 }
790 }
791
792 void init(unsigned InitNumEntries) {
793 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
794 if (allocateBuckets(InitBuckets)) {
795 this->BaseT::initEmpty();
796 } else {
797 NumEntries = 0;
798 NumTombstones = 0;
799 }
800 }
801
802 void grow(unsigned AtLeast) {
803 unsigned OldNumBuckets = NumBuckets;
804 BucketT *OldBuckets = Buckets;
805
806 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
807 assert(Buckets);
808 if (!OldBuckets) {
809 this->BaseT::initEmpty();
810 return;
811 }
812
813 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
814
815 // Free the old table.
816 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
817 alignof(BucketT));
818 }
819
821 unsigned OldNumBuckets = NumBuckets;
822 unsigned OldNumEntries = NumEntries;
823 this->destroyAll();
824
825 // Reduce the number of buckets.
826 unsigned NewNumBuckets = 0;
827 if (OldNumEntries)
828 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
829 if (NewNumBuckets == NumBuckets) {
830 this->BaseT::initEmpty();
831 return;
832 }
833
834 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
835 alignof(BucketT));
836 init(NewNumBuckets);
837 }
838
839private:
840 unsigned getNumEntries() const {
841 return NumEntries;
842 }
843
844 void setNumEntries(unsigned Num) {
845 NumEntries = Num;
846 }
847
848 unsigned getNumTombstones() const {
849 return NumTombstones;
850 }
851
852 void setNumTombstones(unsigned Num) {
853 NumTombstones = Num;
854 }
855
856 BucketT *getBuckets() const {
857 return Buckets;
858 }
859
860 unsigned getNumBuckets() const {
861 return NumBuckets;
862 }
863
864 bool allocateBuckets(unsigned Num) {
865 NumBuckets = Num;
866 if (NumBuckets == 0) {
867 Buckets = nullptr;
868 return false;
869 }
870
871 Buckets = static_cast<BucketT *>(
872 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
873 return true;
874 }
875};
876
877template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
878 typename KeyInfoT = DenseMapInfo<KeyT>,
881 : public DenseMapBase<
882 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
883 ValueT, KeyInfoT, BucketT> {
884 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
885
886 // Lift some types from the dependent base class into this class for
887 // simplicity of referring to them.
889
890 static_assert(isPowerOf2_64(InlineBuckets),
891 "InlineBuckets must be a power of 2.");
892
893 unsigned Small : 1;
894 unsigned NumEntries : 31;
895 unsigned NumTombstones;
896
897 struct LargeRep {
898 BucketT *Buckets;
899 unsigned NumBuckets;
900 };
901
902 /// A "union" of an inline bucket array and the struct representing
903 /// a large bucket. This union will be discriminated by the 'Small' bit.
905
906public:
907 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
908 if (NumInitBuckets > InlineBuckets)
909 NumInitBuckets = llvm::bit_ceil(NumInitBuckets);
910 init(NumInitBuckets);
911 }
912
914 init(0);
915 copyFrom(other);
916 }
917
919 init(0);
920 swap(other);
921 }
922
923 template<typename InputIt>
924 SmallDenseMap(const InputIt &I, const InputIt &E) {
925 init(NextPowerOf2(std::distance(I, E)));
926 this->insert(I, E);
927 }
928
929 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
930 : SmallDenseMap(Vals.begin(), Vals.end()) {}
931
933 this->destroyAll();
934 deallocateBuckets();
935 }
936
938 unsigned TmpNumEntries = RHS.NumEntries;
939 RHS.NumEntries = NumEntries;
940 NumEntries = TmpNumEntries;
941 std::swap(NumTombstones, RHS.NumTombstones);
942
943 const KeyT EmptyKey = this->getEmptyKey();
944 const KeyT TombstoneKey = this->getTombstoneKey();
945 if (Small && RHS.Small) {
946 // If we're swapping inline bucket arrays, we have to cope with some of
947 // the tricky bits of DenseMap's storage system: the buckets are not
948 // fully initialized. Thus we swap every key, but we may have
949 // a one-directional move of the value.
950 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
951 BucketT *LHSB = &getInlineBuckets()[i],
952 *RHSB = &RHS.getInlineBuckets()[i];
953 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
954 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
955 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
956 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
957 if (hasLHSValue && hasRHSValue) {
958 // Swap together if we can...
959 std::swap(*LHSB, *RHSB);
960 continue;
961 }
962 // Swap separately and handle any asymmetry.
963 std::swap(LHSB->getFirst(), RHSB->getFirst());
964 if (hasLHSValue) {
965 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
966 LHSB->getSecond().~ValueT();
967 } else if (hasRHSValue) {
968 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
969 RHSB->getSecond().~ValueT();
970 }
971 }
972 return;
973 }
974 if (!Small && !RHS.Small) {
975 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
976 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
977 return;
978 }
979
980 SmallDenseMap &SmallSide = Small ? *this : RHS;
981 SmallDenseMap &LargeSide = Small ? RHS : *this;
982
983 // First stash the large side's rep and move the small side across.
984 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
985 LargeSide.getLargeRep()->~LargeRep();
986 LargeSide.Small = true;
987 // This is similar to the standard move-from-old-buckets, but the bucket
988 // count hasn't actually rotated in this case. So we have to carefully
989 // move construct the keys and values into their new locations, but there
990 // is no need to re-hash things.
991 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
992 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
993 *OldB = &SmallSide.getInlineBuckets()[i];
994 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
995 OldB->getFirst().~KeyT();
996 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
997 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
998 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
999 OldB->getSecond().~ValueT();
1000 }
1001 }
1002
1003 // The hard part of moving the small buckets across is done, just move
1004 // the TmpRep into its new home.
1005 SmallSide.Small = false;
1006 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1007 }
1008
1010 if (&other != this)
1011 copyFrom(other);
1012 return *this;
1013 }
1014
1016 this->destroyAll();
1017 deallocateBuckets();
1018 init(0);
1019 swap(other);
1020 return *this;
1021 }
1022
1023 void copyFrom(const SmallDenseMap& other) {
1024 this->destroyAll();
1025 deallocateBuckets();
1026 Small = true;
1027 if (other.getNumBuckets() > InlineBuckets) {
1028 Small = false;
1029 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1030 }
1031 this->BaseT::copyFrom(other);
1032 }
1033
1034 void init(unsigned InitBuckets) {
1035 Small = true;
1036 if (InitBuckets > InlineBuckets) {
1037 Small = false;
1038 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1039 }
1040 this->BaseT::initEmpty();
1041 }
1042
1043 void grow(unsigned AtLeast) {
1044 if (AtLeast > InlineBuckets)
1045 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1046
1047 if (Small) {
1048 // First move the inline buckets into a temporary storage.
1050 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1051 BucketT *TmpEnd = TmpBegin;
1052
1053 // Loop over the buckets, moving non-empty, non-tombstones into the
1054 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1055 const KeyT EmptyKey = this->getEmptyKey();
1056 const KeyT TombstoneKey = this->getTombstoneKey();
1057 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1058 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1059 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1060 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1061 "Too many inline buckets!");
1062 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1063 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1064 ++TmpEnd;
1065 P->getSecond().~ValueT();
1066 }
1067 P->getFirst().~KeyT();
1068 }
1069
1070 // AtLeast == InlineBuckets can happen if there are many tombstones,
1071 // and grow() is used to remove them. Usually we always switch to the
1072 // large rep here.
1073 if (AtLeast > InlineBuckets) {
1074 Small = false;
1075 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1076 }
1077 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1078 return;
1079 }
1080
1081 LargeRep OldRep = std::move(*getLargeRep());
1082 getLargeRep()->~LargeRep();
1083 if (AtLeast <= InlineBuckets) {
1084 Small = true;
1085 } else {
1086 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1087 }
1088
1089 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1090
1091 // Free the old table.
1092 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1093 alignof(BucketT));
1094 }
1095
1097 unsigned OldSize = this->size();
1098 this->destroyAll();
1099
1100 // Reduce the number of buckets.
1101 unsigned NewNumBuckets = 0;
1102 if (OldSize) {
1103 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1104 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1105 NewNumBuckets = 64;
1106 }
1107 if ((Small && NewNumBuckets <= InlineBuckets) ||
1108 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1109 this->BaseT::initEmpty();
1110 return;
1111 }
1112
1113 deallocateBuckets();
1114 init(NewNumBuckets);
1115 }
1116
1117private:
1118 unsigned getNumEntries() const {
1119 return NumEntries;
1120 }
1121
1122 void setNumEntries(unsigned Num) {
1123 // NumEntries is hardcoded to be 31 bits wide.
1124 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1125 NumEntries = Num;
1126 }
1127
1128 unsigned getNumTombstones() const {
1129 return NumTombstones;
1130 }
1131
1132 void setNumTombstones(unsigned Num) {
1133 NumTombstones = Num;
1134 }
1135
1136 const BucketT *getInlineBuckets() const {
1137 assert(Small);
1138 // Note that this cast does not violate aliasing rules as we assert that
1139 // the memory's dynamic type is the small, inline bucket buffer, and the
1140 // 'storage' is a POD containing a char buffer.
1141 return reinterpret_cast<const BucketT *>(&storage);
1142 }
1143
1144 BucketT *getInlineBuckets() {
1145 return const_cast<BucketT *>(
1146 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1147 }
1148
1149 const LargeRep *getLargeRep() const {
1150 assert(!Small);
1151 // Note, same rule about aliasing as with getInlineBuckets.
1152 return reinterpret_cast<const LargeRep *>(&storage);
1153 }
1154
1155 LargeRep *getLargeRep() {
1156 return const_cast<LargeRep *>(
1157 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1158 }
1159
1160 const BucketT *getBuckets() const {
1161 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1162 }
1163
1164 BucketT *getBuckets() {
1165 return const_cast<BucketT *>(
1166 const_cast<const SmallDenseMap *>(this)->getBuckets());
1167 }
1168
1169 unsigned getNumBuckets() const {
1170 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1171 }
1172
1173 void deallocateBuckets() {
1174 if (Small)
1175 return;
1176
1177 deallocate_buffer(getLargeRep()->Buckets,
1178 sizeof(BucketT) * getLargeRep()->NumBuckets,
1179 alignof(BucketT));
1180 getLargeRep()->~LargeRep();
1181 }
1182
1183 LargeRep allocateBuckets(unsigned Num) {
1184 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1185 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1186 sizeof(BucketT) * Num, alignof(BucketT))),
1187 Num};
1188 return Rep;
1189 }
1190};
1191
1192template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1193 bool IsConst>
1195 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1196 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1197
1198public:
1200 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1203 using iterator_category = std::forward_iterator_tag;
1204
1205private:
1206 pointer Ptr = nullptr;
1207 pointer End = nullptr;
1208
1209public:
1210 DenseMapIterator() = default;
1211
1213 bool NoAdvance = false)
1214 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1215 assert(isHandleInSync() && "invalid construction!");
1216
1217 if (NoAdvance) return;
1218 if (shouldReverseIterate<KeyT>()) {
1219 RetreatPastEmptyBuckets();
1220 return;
1221 }
1222 AdvancePastEmptyBuckets();
1223 }
1224
1225 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1226 // for const iterator destinations so it doesn't end up as a user defined copy
1227 // constructor.
1228 template <bool IsConstSrc,
1229 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1232 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1233
1235 assert(isHandleInSync() && "invalid iterator access!");
1236 assert(Ptr != End && "dereferencing end() iterator");
1237 if (shouldReverseIterate<KeyT>())
1238 return Ptr[-1];
1239 return *Ptr;
1240 }
1242 assert(isHandleInSync() && "invalid iterator access!");
1243 assert(Ptr != End && "dereferencing end() iterator");
1244 if (shouldReverseIterate<KeyT>())
1245 return &(Ptr[-1]);
1246 return Ptr;
1247 }
1248
1249 friend bool operator==(const DenseMapIterator &LHS,
1250 const DenseMapIterator &RHS) {
1251 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1252 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1253 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1254 "comparing incomparable iterators!");
1255 return LHS.Ptr == RHS.Ptr;
1256 }
1257
1258 friend bool operator!=(const DenseMapIterator &LHS,
1259 const DenseMapIterator &RHS) {
1260 return !(LHS == RHS);
1261 }
1262
1263 inline DenseMapIterator& operator++() { // Preincrement
1264 assert(isHandleInSync() && "invalid iterator access!");
1265 assert(Ptr != End && "incrementing end() iterator");
1266 if (shouldReverseIterate<KeyT>()) {
1267 --Ptr;
1268 RetreatPastEmptyBuckets();
1269 return *this;
1270 }
1271 ++Ptr;
1272 AdvancePastEmptyBuckets();
1273 return *this;
1274 }
1275 DenseMapIterator operator++(int) { // Postincrement
1276 assert(isHandleInSync() && "invalid iterator access!");
1277 DenseMapIterator tmp = *this; ++*this; return tmp;
1278 }
1279
1280private:
1281 void AdvancePastEmptyBuckets() {
1282 assert(Ptr <= End);
1283 const KeyT Empty = KeyInfoT::getEmptyKey();
1284 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1285
1286 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1287 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1288 ++Ptr;
1289 }
1290
1291 void RetreatPastEmptyBuckets() {
1292 assert(Ptr >= End);
1293 const KeyT Empty = KeyInfoT::getEmptyKey();
1294 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1295
1296 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1297 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1298 --Ptr;
1299 }
1300};
1301
1302template <typename KeyT, typename ValueT, typename KeyInfoT>
1304 return X.getMemorySize();
1305}
1306
1307} // end namespace llvm
1308
1309#endif // LLVM_ADT_DENSEMAP_H
aarch64 promote const
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:210
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:209
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define I(x, y, z)
Definition: MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
return ToRemove size() > 0
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:444
static const KeyT getEmptyKey()
Definition: DenseMap.h:453
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:333
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:214
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:222
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:273
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:185
const_iterator end() const
Definition: DenseMap.h:94
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:247
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:395
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:175
unsigned size() const
Definition: DenseMap.h:99
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:159
bool empty() const
Definition: DenseMap.h:98
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:297
iterator begin()
Definition: DenseMap.h:75
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
iterator end()
Definition: DenseMap.h:84
static const KeyT getTombstoneKey()
Definition: DenseMap.h:459
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:348
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:421
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:321
const_iterator begin() const
Definition: DenseMap.h:87
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:355
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:386
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:449
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:329
BucketT value_type
Definition: DenseMap.h:69
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition: DenseMap.h:73
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
void erase(iterator I)
Definition: DenseMap.h:313
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:341
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:672
std::conditional_t< IsConst, const Bucket, Bucket > value_type
Definition: DenseMap.h:1200
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1258
value_type * pointer
Definition: DenseMap.h:1201
DenseMapIterator & operator++()
Definition: DenseMap.h:1263
pointer operator->() const
Definition: DenseMap.h:1241
reference operator*() const
Definition: DenseMap.h:1234
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1212
DenseMapIterator operator++(int)
Definition: DenseMap.h:1275
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1230
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1249
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1203
value_type & reference
Definition: DenseMap.h:1202
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:748
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:781
void shrink_and_clear()
Definition: DenseMap.h:820
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:773
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:730
void grow(unsigned AtLeast)
Definition: DenseMap.h:802
void init(unsigned InitNumEntries)
Definition: DenseMap.h:792
DenseMap(const DenseMap &other)
Definition: DenseMap.h:732
void swap(DenseMap &RHS)
Definition: DenseMap.h:758
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:743
DenseMap(DenseMap &&other)
Definition: DenseMap.h:737
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:767
void grow(unsigned AtLeast)
Definition: DenseMap.h:1043
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:924
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:937
void init(unsigned InitBuckets)
Definition: DenseMap.h:1034
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:1015
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:1009
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:907
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:929
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:918
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:913
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1023
void shrink_and_clear()
Definition: DenseMap.h:1096
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:386
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:2038
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:828
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2040
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition: bit.h:306
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:15
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:24
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1862
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:437
Definition: BitVector.h:851
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
const ValueT & getSecond() const
Definition: DenseMap.h:48
const KeyT & getFirst() const
Definition: DenseMap.h:46