LLVM  8.0.0svn
DenseMap.h
Go to the documentation of this file.
1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16 
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <cstddef>
27 #include <cstring>
28 #include <iterator>
29 #include <new>
30 #include <type_traits>
31 #include <utility>
32 
33 namespace llvm {
34 
35 namespace detail {
36 
37 // We extend a pair to allow users to override the bucket type with their own
38 // implementation without requiring two members.
39 template <typename KeyT, typename ValueT>
40 struct DenseMapPair : public std::pair<KeyT, ValueT> {
42  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
45 };
46 
47 } // end namespace detail
48 
49 template <typename KeyT, typename ValueT,
50  typename KeyInfoT = DenseMapInfo<KeyT>,
52  bool IsConst = false>
54 
55 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
56  typename BucketT>
57 class DenseMapBase : public DebugEpochBase {
58  template <typename T>
59  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
60 
61 public:
63  using key_type = KeyT;
64  using mapped_type = ValueT;
65  using value_type = BucketT;
66 
68  using const_iterator =
70 
71  inline iterator begin() {
72  // When the map is empty, avoid the overhead of advancing/retreating past
73  // empty buckets.
74  if (empty())
75  return end();
76  if (shouldReverseIterate<KeyT>())
77  return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
78  return makeIterator(getBuckets(), getBucketsEnd(), *this);
79  }
80  inline iterator end() {
81  return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
82  }
83  inline const_iterator begin() const {
84  if (empty())
85  return end();
86  if (shouldReverseIterate<KeyT>())
87  return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
88  return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
89  }
90  inline const_iterator end() const {
91  return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
92  }
93 
94  LLVM_NODISCARD bool empty() const {
95  return getNumEntries() == 0;
96  }
97  unsigned size() const { return getNumEntries(); }
98 
99  /// Grow the densemap so that it can contain at least \p NumEntries items
100  /// before resizing again.
101  void reserve(size_type NumEntries) {
102  auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
103  incrementEpoch();
104  if (NumBuckets > getNumBuckets())
105  grow(NumBuckets);
106  }
107 
108  void clear() {
109  incrementEpoch();
110  if (getNumEntries() == 0 && getNumTombstones() == 0) return;
111 
112  // If the capacity of the array is huge, and the # elements used is small,
113  // shrink the array.
114  if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
115  shrink_and_clear();
116  return;
117  }
118 
119  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
121  // Use a simpler loop when these are trivial types.
122  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
123  P->getFirst() = EmptyKey;
124  } else {
125  unsigned NumEntries = getNumEntries();
126  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
127  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
128  if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
129  P->getSecond().~ValueT();
130  --NumEntries;
131  }
132  P->getFirst() = EmptyKey;
133  }
134  }
135  assert(NumEntries == 0 && "Node count imbalance!");
136  }
137  setNumEntries(0);
138  setNumTombstones(0);
139  }
140 
141  /// Return 1 if the specified key is in the map, 0 otherwise.
142  size_type count(const_arg_type_t<KeyT> Val) const {
143  const BucketT *TheBucket;
144  return LookupBucketFor(Val, TheBucket) ? 1 : 0;
145  }
146 
147  iterator find(const_arg_type_t<KeyT> Val) {
148  BucketT *TheBucket;
149  if (LookupBucketFor(Val, TheBucket))
150  return makeIterator(TheBucket, getBucketsEnd(), *this, true);
151  return end();
152  }
153  const_iterator find(const_arg_type_t<KeyT> Val) const {
154  const BucketT *TheBucket;
155  if (LookupBucketFor(Val, TheBucket))
156  return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
157  return end();
158  }
159 
160  /// Alternate version of find() which allows a different, and possibly
161  /// less expensive, key type.
162  /// The DenseMapInfo is responsible for supplying methods
163  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
164  /// type used.
165  template<class LookupKeyT>
166  iterator find_as(const LookupKeyT &Val) {
167  BucketT *TheBucket;
168  if (LookupBucketFor(Val, TheBucket))
169  return makeIterator(TheBucket, getBucketsEnd(), *this, true);
170  return end();
171  }
172  template<class LookupKeyT>
173  const_iterator find_as(const LookupKeyT &Val) const {
174  const BucketT *TheBucket;
175  if (LookupBucketFor(Val, TheBucket))
176  return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
177  return end();
178  }
179 
180  /// lookup - Return the entry for the specified key, or a default
181  /// constructed value if no such entry exists.
182  ValueT lookup(const_arg_type_t<KeyT> Val) const {
183  const BucketT *TheBucket;
184  if (LookupBucketFor(Val, TheBucket))
185  return TheBucket->getSecond();
186  return ValueT();
187  }
188 
189  // Inserts key,value pair into the map if the key isn't already in the map.
190  // If the key is already in the map, it returns false and doesn't update the
191  // value.
192  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
193  return try_emplace(KV.first, KV.second);
194  }
195 
196  // Inserts key,value pair into the map if the key isn't already in the map.
197  // If the key is already in the map, it returns false and doesn't update the
198  // value.
199  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
200  return try_emplace(std::move(KV.first), std::move(KV.second));
201  }
202 
203  // Inserts key,value pair into the map if the key isn't already in the map.
204  // The value is constructed in-place if the key is not in the map, otherwise
205  // it is not moved.
206  template <typename... Ts>
207  std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
208  BucketT *TheBucket;
209  if (LookupBucketFor(Key, TheBucket))
210  return std::make_pair(
211  makeIterator(TheBucket, getBucketsEnd(), *this, true),
212  false); // Already in map.
213 
214  // Otherwise, insert the new element.
215  TheBucket =
216  InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
217  return std::make_pair(
218  makeIterator(TheBucket, getBucketsEnd(), *this, true),
219  true);
220  }
221 
222  // Inserts key,value pair into the map if the key isn't already in the map.
223  // The value is constructed in-place if the key is not in the map, otherwise
224  // it is not moved.
225  template <typename... Ts>
226  std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
227  BucketT *TheBucket;
228  if (LookupBucketFor(Key, TheBucket))
229  return std::make_pair(
230  makeIterator(TheBucket, getBucketsEnd(), *this, true),
231  false); // Already in map.
232 
233  // Otherwise, insert the new element.
234  TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
235  return std::make_pair(
236  makeIterator(TheBucket, getBucketsEnd(), *this, true),
237  true);
238  }
239 
240  /// Alternate version of insert() which allows a different, and possibly
241  /// less expensive, key type.
242  /// The DenseMapInfo is responsible for supplying methods
243  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
244  /// type used.
245  template <typename LookupKeyT>
246  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
247  const LookupKeyT &Val) {
248  BucketT *TheBucket;
249  if (LookupBucketFor(Val, TheBucket))
250  return std::make_pair(
251  makeIterator(TheBucket, getBucketsEnd(), *this, true),
252  false); // Already in map.
253 
254  // Otherwise, insert the new element.
255  TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
256  std::move(KV.second), Val);
257  return std::make_pair(
258  makeIterator(TheBucket, getBucketsEnd(), *this, true),
259  true);
260  }
261 
262  /// insert - Range insertion of pairs.
263  template<typename InputIt>
264  void insert(InputIt I, InputIt E) {
265  for (; I != E; ++I)
266  insert(*I);
267  }
268 
269  bool erase(const KeyT &Val) {
270  BucketT *TheBucket;
271  if (!LookupBucketFor(Val, TheBucket))
272  return false; // not in map.
273 
274  TheBucket->getSecond().~ValueT();
275  TheBucket->getFirst() = getTombstoneKey();
276  decrementNumEntries();
277  incrementNumTombstones();
278  return true;
279  }
280  void erase(iterator I) {
281  BucketT *TheBucket = &*I;
282  TheBucket->getSecond().~ValueT();
283  TheBucket->getFirst() = getTombstoneKey();
284  decrementNumEntries();
285  incrementNumTombstones();
286  }
287 
289  BucketT *TheBucket;
290  if (LookupBucketFor(Key, TheBucket))
291  return *TheBucket;
292 
293  return *InsertIntoBucket(TheBucket, Key);
294  }
295 
296  ValueT &operator[](const KeyT &Key) {
297  return FindAndConstruct(Key).second;
298  }
299 
301  BucketT *TheBucket;
302  if (LookupBucketFor(Key, TheBucket))
303  return *TheBucket;
304 
305  return *InsertIntoBucket(TheBucket, std::move(Key));
306  }
307 
308  ValueT &operator[](KeyT &&Key) {
309  return FindAndConstruct(std::move(Key)).second;
310  }
311 
312  /// isPointerIntoBucketsArray - Return true if the specified pointer points
313  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
314  /// value in the DenseMap).
315  bool isPointerIntoBucketsArray(const void *Ptr) const {
316  return Ptr >= getBuckets() && Ptr < getBucketsEnd();
317  }
318 
319  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
320  /// array. In conjunction with the previous method, this can be used to
321  /// determine whether an insertion caused the DenseMap to reallocate.
322  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
323 
324 protected:
325  DenseMapBase() = default;
326 
327  void destroyAll() {
328  if (getNumBuckets() == 0) // Nothing to do.
329  return;
330 
331  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
332  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
333  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
334  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
335  P->getSecond().~ValueT();
336  P->getFirst().~KeyT();
337  }
338  }
339 
340  void initEmpty() {
341  setNumEntries(0);
342  setNumTombstones(0);
343 
344  assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
345  "# initial buckets must be a power of two!");
346  const KeyT EmptyKey = getEmptyKey();
347  for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
348  ::new (&B->getFirst()) KeyT(EmptyKey);
349  }
350 
351  /// Returns the number of buckets to allocate to ensure that the DenseMap can
352  /// accommodate \p NumEntries without need to grow().
353  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
354  // Ensure that "NumEntries * 4 < NumBuckets * 3"
355  if (NumEntries == 0)
356  return 0;
357  // +1 is required because of the strict equality.
358  // For example if NumEntries is 48, we need to return 401.
359  return NextPowerOf2(NumEntries * 4 / 3 + 1);
360  }
361 
362  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
363  initEmpty();
364 
365  // Insert all the old elements.
366  const KeyT EmptyKey = getEmptyKey();
367  const KeyT TombstoneKey = getTombstoneKey();
368  for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
369  if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
370  !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
371  // Insert the key/value into the new table.
372  BucketT *DestBucket;
373  bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
374  (void)FoundVal; // silence warning.
375  assert(!FoundVal && "Key already in new map?");
376  DestBucket->getFirst() = std::move(B->getFirst());
377  ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
378  incrementNumEntries();
379 
380  // Free the value.
381  B->getSecond().~ValueT();
382  }
383  B->getFirst().~KeyT();
384  }
385  }
386 
387  template <typename OtherBaseT>
388  void copyFrom(
390  assert(&other != this);
391  assert(getNumBuckets() == other.getNumBuckets());
392 
393  setNumEntries(other.getNumEntries());
394  setNumTombstones(other.getNumTombstones());
395 
397  memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
398  getNumBuckets() * sizeof(BucketT));
399  else
400  for (size_t i = 0; i < getNumBuckets(); ++i) {
401  ::new (&getBuckets()[i].getFirst())
402  KeyT(other.getBuckets()[i].getFirst());
403  if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
404  !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
405  ::new (&getBuckets()[i].getSecond())
406  ValueT(other.getBuckets()[i].getSecond());
407  }
408  }
409 
410  static unsigned getHashValue(const KeyT &Val) {
411  return KeyInfoT::getHashValue(Val);
412  }
413 
414  template<typename LookupKeyT>
415  static unsigned getHashValue(const LookupKeyT &Val) {
416  return KeyInfoT::getHashValue(Val);
417  }
418 
419  static const KeyT getEmptyKey() {
420  static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
421  "Must pass the derived type to this template!");
422  return KeyInfoT::getEmptyKey();
423  }
424 
425  static const KeyT getTombstoneKey() {
426  return KeyInfoT::getTombstoneKey();
427  }
428 
429 private:
430  iterator makeIterator(BucketT *P, BucketT *E,
431  DebugEpochBase &Epoch,
432  bool NoAdvance=false) {
433  if (shouldReverseIterate<KeyT>()) {
434  BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
435  return iterator(B, E, Epoch, NoAdvance);
436  }
437  return iterator(P, E, Epoch, NoAdvance);
438  }
439 
440  const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
441  const DebugEpochBase &Epoch,
442  const bool NoAdvance=false) const {
443  if (shouldReverseIterate<KeyT>()) {
444  const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
445  return const_iterator(B, E, Epoch, NoAdvance);
446  }
447  return const_iterator(P, E, Epoch, NoAdvance);
448  }
449 
450  unsigned getNumEntries() const {
451  return static_cast<const DerivedT *>(this)->getNumEntries();
452  }
453 
454  void setNumEntries(unsigned Num) {
455  static_cast<DerivedT *>(this)->setNumEntries(Num);
456  }
457 
458  void incrementNumEntries() {
459  setNumEntries(getNumEntries() + 1);
460  }
461 
462  void decrementNumEntries() {
463  setNumEntries(getNumEntries() - 1);
464  }
465 
466  unsigned getNumTombstones() const {
467  return static_cast<const DerivedT *>(this)->getNumTombstones();
468  }
469 
470  void setNumTombstones(unsigned Num) {
471  static_cast<DerivedT *>(this)->setNumTombstones(Num);
472  }
473 
474  void incrementNumTombstones() {
475  setNumTombstones(getNumTombstones() + 1);
476  }
477 
478  void decrementNumTombstones() {
479  setNumTombstones(getNumTombstones() - 1);
480  }
481 
482  const BucketT *getBuckets() const {
483  return static_cast<const DerivedT *>(this)->getBuckets();
484  }
485 
486  BucketT *getBuckets() {
487  return static_cast<DerivedT *>(this)->getBuckets();
488  }
489 
490  unsigned getNumBuckets() const {
491  return static_cast<const DerivedT *>(this)->getNumBuckets();
492  }
493 
494  BucketT *getBucketsEnd() {
495  return getBuckets() + getNumBuckets();
496  }
497 
498  const BucketT *getBucketsEnd() const {
499  return getBuckets() + getNumBuckets();
500  }
501 
502  void grow(unsigned AtLeast) {
503  static_cast<DerivedT *>(this)->grow(AtLeast);
504  }
505 
506  void shrink_and_clear() {
507  static_cast<DerivedT *>(this)->shrink_and_clear();
508  }
509 
510  template <typename KeyArg, typename... ValueArgs>
511  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
512  ValueArgs &&... Values) {
513  TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
514 
515  TheBucket->getFirst() = std::forward<KeyArg>(Key);
516  ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
517  return TheBucket;
518  }
519 
520  template <typename LookupKeyT>
521  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
522  ValueT &&Value, LookupKeyT &Lookup) {
523  TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
524 
525  TheBucket->getFirst() = std::move(Key);
526  ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
527  return TheBucket;
528  }
529 
530  template <typename LookupKeyT>
531  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
532  BucketT *TheBucket) {
533  incrementEpoch();
534 
535  // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
536  // the buckets are empty (meaning that many are filled with tombstones),
537  // grow the table.
538  //
539  // The later case is tricky. For example, if we had one empty bucket with
540  // tons of tombstones, failing lookups (e.g. for insertion) would have to
541  // probe almost the entire table until it found the empty bucket. If the
542  // table completely filled with tombstones, no lookup would ever succeed,
543  // causing infinite loops in lookup.
544  unsigned NewNumEntries = getNumEntries() + 1;
545  unsigned NumBuckets = getNumBuckets();
546  if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
547  this->grow(NumBuckets * 2);
548  LookupBucketFor(Lookup, TheBucket);
549  NumBuckets = getNumBuckets();
550  } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
551  NumBuckets/8)) {
552  this->grow(NumBuckets);
553  LookupBucketFor(Lookup, TheBucket);
554  }
555  assert(TheBucket);
556 
557  // Only update the state after we've grown our bucket space appropriately
558  // so that when growing buckets we have self-consistent entry count.
559  incrementNumEntries();
560 
561  // If we are writing over a tombstone, remember this.
562  const KeyT EmptyKey = getEmptyKey();
563  if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
564  decrementNumTombstones();
565 
566  return TheBucket;
567  }
568 
569  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
570  /// FoundBucket. If the bucket contains the key and a value, this returns
571  /// true, otherwise it returns a bucket with an empty marker or tombstone and
572  /// returns false.
573  template<typename LookupKeyT>
574  bool LookupBucketFor(const LookupKeyT &Val,
575  const BucketT *&FoundBucket) const {
576  const BucketT *BucketsPtr = getBuckets();
577  const unsigned NumBuckets = getNumBuckets();
578 
579  if (NumBuckets == 0) {
580  FoundBucket = nullptr;
581  return false;
582  }
583 
584  // FoundTombstone - Keep track of whether we find a tombstone while probing.
585  const BucketT *FoundTombstone = nullptr;
586  const KeyT EmptyKey = getEmptyKey();
587  const KeyT TombstoneKey = getTombstoneKey();
588  assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
589  !KeyInfoT::isEqual(Val, TombstoneKey) &&
590  "Empty/Tombstone value shouldn't be inserted into map!");
591 
592  unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
593  unsigned ProbeAmt = 1;
594  while (true) {
595  const BucketT *ThisBucket = BucketsPtr + BucketNo;
596  // Found Val's bucket? If so, return it.
597  if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
598  FoundBucket = ThisBucket;
599  return true;
600  }
601 
602  // If we found an empty bucket, the key doesn't exist in the set.
603  // Insert it and return the default value.
604  if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
605  // If we've already seen a tombstone while probing, fill it in instead
606  // of the empty bucket we eventually probed to.
607  FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
608  return false;
609  }
610 
611  // If this is a tombstone, remember it. If Val ends up not in the map, we
612  // prefer to return it than something that would require more probing.
613  if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
614  !FoundTombstone)
615  FoundTombstone = ThisBucket; // Remember the first tombstone found.
616 
617  // Otherwise, it's a hash collision or a tombstone, continue quadratic
618  // probing.
619  BucketNo += ProbeAmt++;
620  BucketNo &= (NumBuckets-1);
621  }
622  }
623 
624  template <typename LookupKeyT>
625  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
626  const BucketT *ConstFoundBucket;
627  bool Result = const_cast<const DenseMapBase *>(this)
628  ->LookupBucketFor(Val, ConstFoundBucket);
629  FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
630  return Result;
631  }
632 
633 public:
634  /// Return the approximate size (in bytes) of the actual map.
635  /// This is just the raw memory used by DenseMap.
636  /// If entries are pointers to objects, the size of the referenced objects
637  /// are not included.
638  size_t getMemorySize() const {
639  return getNumBuckets() * sizeof(BucketT);
640  }
641 };
642 
643 template <typename KeyT, typename ValueT,
644  typename KeyInfoT = DenseMapInfo<KeyT>,
645  typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
646 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
647  KeyT, ValueT, KeyInfoT, BucketT> {
648  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
649 
650  // Lift some types from the dependent base class into this class for
651  // simplicity of referring to them.
653 
654  BucketT *Buckets;
655  unsigned NumEntries;
656  unsigned NumTombstones;
657  unsigned NumBuckets;
658 
659 public:
660  /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
661  /// this number of elements can be inserted in the map without grow()
662  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
663 
664  DenseMap(const DenseMap &other) : BaseT() {
665  init(0);
666  copyFrom(other);
667  }
668 
669  DenseMap(DenseMap &&other) : BaseT() {
670  init(0);
671  swap(other);
672  }
673 
674  template<typename InputIt>
675  DenseMap(const InputIt &I, const InputIt &E) {
676  init(std::distance(I, E));
677  this->insert(I, E);
678  }
679 
681  this->destroyAll();
682  operator delete(Buckets);
683  }
684 
685  void swap(DenseMap& RHS) {
686  this->incrementEpoch();
687  RHS.incrementEpoch();
688  std::swap(Buckets, RHS.Buckets);
689  std::swap(NumEntries, RHS.NumEntries);
690  std::swap(NumTombstones, RHS.NumTombstones);
691  std::swap(NumBuckets, RHS.NumBuckets);
692  }
693 
694  DenseMap& operator=(const DenseMap& other) {
695  if (&other != this)
696  copyFrom(other);
697  return *this;
698  }
699 
701  this->destroyAll();
702  operator delete(Buckets);
703  init(0);
704  swap(other);
705  return *this;
706  }
707 
708  void copyFrom(const DenseMap& other) {
709  this->destroyAll();
710  operator delete(Buckets);
711  if (allocateBuckets(other.NumBuckets)) {
712  this->BaseT::copyFrom(other);
713  } else {
714  NumEntries = 0;
715  NumTombstones = 0;
716  }
717  }
718 
719  void init(unsigned InitNumEntries) {
720  auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
721  if (allocateBuckets(InitBuckets)) {
722  this->BaseT::initEmpty();
723  } else {
724  NumEntries = 0;
725  NumTombstones = 0;
726  }
727  }
728 
729  void grow(unsigned AtLeast) {
730  unsigned OldNumBuckets = NumBuckets;
731  BucketT *OldBuckets = Buckets;
732 
733  allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
734  assert(Buckets);
735  if (!OldBuckets) {
736  this->BaseT::initEmpty();
737  return;
738  }
739 
740  this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
741 
742  // Free the old table.
743  operator delete(OldBuckets);
744  }
745 
747  unsigned OldNumEntries = NumEntries;
748  this->destroyAll();
749 
750  // Reduce the number of buckets.
751  unsigned NewNumBuckets = 0;
752  if (OldNumEntries)
753  NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
754  if (NewNumBuckets == NumBuckets) {
755  this->BaseT::initEmpty();
756  return;
757  }
758 
759  operator delete(Buckets);
760  init(NewNumBuckets);
761  }
762 
763 private:
764  unsigned getNumEntries() const {
765  return NumEntries;
766  }
767 
768  void setNumEntries(unsigned Num) {
769  NumEntries = Num;
770  }
771 
772  unsigned getNumTombstones() const {
773  return NumTombstones;
774  }
775 
776  void setNumTombstones(unsigned Num) {
777  NumTombstones = Num;
778  }
779 
780  BucketT *getBuckets() const {
781  return Buckets;
782  }
783 
784  unsigned getNumBuckets() const {
785  return NumBuckets;
786  }
787 
788  bool allocateBuckets(unsigned Num) {
789  NumBuckets = Num;
790  if (NumBuckets == 0) {
791  Buckets = nullptr;
792  return false;
793  }
794 
795  Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
796  return true;
797  }
798 };
799 
800 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
801  typename KeyInfoT = DenseMapInfo<KeyT>,
802  typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
804  : public DenseMapBase<
805  SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
806  ValueT, KeyInfoT, BucketT> {
807  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
808 
809  // Lift some types from the dependent base class into this class for
810  // simplicity of referring to them.
812 
813  static_assert(isPowerOf2_64(InlineBuckets),
814  "InlineBuckets must be a power of 2.");
815 
816  unsigned Small : 1;
817  unsigned NumEntries : 31;
818  unsigned NumTombstones;
819 
820  struct LargeRep {
821  BucketT *Buckets;
822  unsigned NumBuckets;
823  };
824 
825  /// A "union" of an inline bucket array and the struct representing
826  /// a large bucket. This union will be discriminated by the 'Small' bit.
828 
829 public:
830  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
831  init(NumInitBuckets);
832  }
833 
834  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
835  init(0);
836  copyFrom(other);
837  }
838 
839  SmallDenseMap(SmallDenseMap &&other) : BaseT() {
840  init(0);
841  swap(other);
842  }
843 
844  template<typename InputIt>
845  SmallDenseMap(const InputIt &I, const InputIt &E) {
846  init(NextPowerOf2(std::distance(I, E)));
847  this->insert(I, E);
848  }
849 
851  this->destroyAll();
852  deallocateBuckets();
853  }
854 
855  void swap(SmallDenseMap& RHS) {
856  unsigned TmpNumEntries = RHS.NumEntries;
857  RHS.NumEntries = NumEntries;
858  NumEntries = TmpNumEntries;
859  std::swap(NumTombstones, RHS.NumTombstones);
860 
861  const KeyT EmptyKey = this->getEmptyKey();
862  const KeyT TombstoneKey = this->getTombstoneKey();
863  if (Small && RHS.Small) {
864  // If we're swapping inline bucket arrays, we have to cope with some of
865  // the tricky bits of DenseMap's storage system: the buckets are not
866  // fully initialized. Thus we swap every key, but we may have
867  // a one-directional move of the value.
868  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
869  BucketT *LHSB = &getInlineBuckets()[i],
870  *RHSB = &RHS.getInlineBuckets()[i];
871  bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
872  !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
873  bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
874  !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
875  if (hasLHSValue && hasRHSValue) {
876  // Swap together if we can...
877  std::swap(*LHSB, *RHSB);
878  continue;
879  }
880  // Swap separately and handle any assymetry.
881  std::swap(LHSB->getFirst(), RHSB->getFirst());
882  if (hasLHSValue) {
883  ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
884  LHSB->getSecond().~ValueT();
885  } else if (hasRHSValue) {
886  ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
887  RHSB->getSecond().~ValueT();
888  }
889  }
890  return;
891  }
892  if (!Small && !RHS.Small) {
893  std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
894  std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
895  return;
896  }
897 
898  SmallDenseMap &SmallSide = Small ? *this : RHS;
899  SmallDenseMap &LargeSide = Small ? RHS : *this;
900 
901  // First stash the large side's rep and move the small side across.
902  LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
903  LargeSide.getLargeRep()->~LargeRep();
904  LargeSide.Small = true;
905  // This is similar to the standard move-from-old-buckets, but the bucket
906  // count hasn't actually rotated in this case. So we have to carefully
907  // move construct the keys and values into their new locations, but there
908  // is no need to re-hash things.
909  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
910  BucketT *NewB = &LargeSide.getInlineBuckets()[i],
911  *OldB = &SmallSide.getInlineBuckets()[i];
912  ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
913  OldB->getFirst().~KeyT();
914  if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
915  !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
916  ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
917  OldB->getSecond().~ValueT();
918  }
919  }
920 
921  // The hard part of moving the small buckets across is done, just move
922  // the TmpRep into its new home.
923  SmallSide.Small = false;
924  new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
925  }
926 
928  if (&other != this)
929  copyFrom(other);
930  return *this;
931  }
932 
934  this->destroyAll();
935  deallocateBuckets();
936  init(0);
937  swap(other);
938  return *this;
939  }
940 
941  void copyFrom(const SmallDenseMap& other) {
942  this->destroyAll();
943  deallocateBuckets();
944  Small = true;
945  if (other.getNumBuckets() > InlineBuckets) {
946  Small = false;
947  new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
948  }
949  this->BaseT::copyFrom(other);
950  }
951 
952  void init(unsigned InitBuckets) {
953  Small = true;
954  if (InitBuckets > InlineBuckets) {
955  Small = false;
956  new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
957  }
958  this->BaseT::initEmpty();
959  }
960 
961  void grow(unsigned AtLeast) {
962  if (AtLeast >= InlineBuckets)
963  AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
964 
965  if (Small) {
966  if (AtLeast < InlineBuckets)
967  return; // Nothing to do.
968 
969  // First move the inline buckets into a temporary storage.
971  BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
972  BucketT *TmpEnd = TmpBegin;
973 
974  // Loop over the buckets, moving non-empty, non-tombstones into the
975  // temporary storage. Have the loop move the TmpEnd forward as it goes.
976  const KeyT EmptyKey = this->getEmptyKey();
977  const KeyT TombstoneKey = this->getTombstoneKey();
978  for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
979  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
980  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
981  assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
982  "Too many inline buckets!");
983  ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
984  ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
985  ++TmpEnd;
986  P->getSecond().~ValueT();
987  }
988  P->getFirst().~KeyT();
989  }
990 
991  // Now make this map use the large rep, and move all the entries back
992  // into it.
993  Small = false;
994  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
995  this->moveFromOldBuckets(TmpBegin, TmpEnd);
996  return;
997  }
998 
999  LargeRep OldRep = std::move(*getLargeRep());
1000  getLargeRep()->~LargeRep();
1001  if (AtLeast <= InlineBuckets) {
1002  Small = true;
1003  } else {
1004  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1005  }
1006 
1007  this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1008 
1009  // Free the old table.
1010  operator delete(OldRep.Buckets);
1011  }
1012 
1014  unsigned OldSize = this->size();
1015  this->destroyAll();
1016 
1017  // Reduce the number of buckets.
1018  unsigned NewNumBuckets = 0;
1019  if (OldSize) {
1020  NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1021  if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1022  NewNumBuckets = 64;
1023  }
1024  if ((Small && NewNumBuckets <= InlineBuckets) ||
1025  (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1026  this->BaseT::initEmpty();
1027  return;
1028  }
1029 
1030  deallocateBuckets();
1031  init(NewNumBuckets);
1032  }
1033 
1034 private:
1035  unsigned getNumEntries() const {
1036  return NumEntries;
1037  }
1038 
1039  void setNumEntries(unsigned Num) {
1040  // NumEntries is hardcoded to be 31 bits wide.
1041  assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1042  NumEntries = Num;
1043  }
1044 
1045  unsigned getNumTombstones() const {
1046  return NumTombstones;
1047  }
1048 
1049  void setNumTombstones(unsigned Num) {
1050  NumTombstones = Num;
1051  }
1052 
1053  const BucketT *getInlineBuckets() const {
1054  assert(Small);
1055  // Note that this cast does not violate aliasing rules as we assert that
1056  // the memory's dynamic type is the small, inline bucket buffer, and the
1057  // 'storage.buffer' static type is 'char *'.
1058  return reinterpret_cast<const BucketT *>(storage.buffer);
1059  }
1060 
1061  BucketT *getInlineBuckets() {
1062  return const_cast<BucketT *>(
1063  const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1064  }
1065 
1066  const LargeRep *getLargeRep() const {
1067  assert(!Small);
1068  // Note, same rule about aliasing as with getInlineBuckets.
1069  return reinterpret_cast<const LargeRep *>(storage.buffer);
1070  }
1071 
1072  LargeRep *getLargeRep() {
1073  return const_cast<LargeRep *>(
1074  const_cast<const SmallDenseMap *>(this)->getLargeRep());
1075  }
1076 
1077  const BucketT *getBuckets() const {
1078  return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1079  }
1080 
1081  BucketT *getBuckets() {
1082  return const_cast<BucketT *>(
1083  const_cast<const SmallDenseMap *>(this)->getBuckets());
1084  }
1085 
1086  unsigned getNumBuckets() const {
1087  return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1088  }
1089 
1090  void deallocateBuckets() {
1091  if (Small)
1092  return;
1093 
1094  operator delete(getLargeRep()->Buckets);
1095  getLargeRep()->~LargeRep();
1096  }
1097 
1098  LargeRep allocateBuckets(unsigned Num) {
1099  assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1100  LargeRep Rep = {
1101  static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
1102  };
1103  return Rep;
1104  }
1105 };
1106 
1107 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1108  bool IsConst>
1110  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1111  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1112 
1114 
1115 public:
1117  using value_type =
1118  typename std::conditional<IsConst, const Bucket, Bucket>::type;
1119  using pointer = value_type *;
1121  using iterator_category = std::forward_iterator_tag;
1122 
1123 private:
1124  pointer Ptr = nullptr;
1125  pointer End = nullptr;
1126 
1127 public:
1128  DenseMapIterator() = default;
1129 
1131  bool NoAdvance = false)
1132  : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1133  assert(isHandleInSync() && "invalid construction!");
1134 
1135  if (NoAdvance) return;
1136  if (shouldReverseIterate<KeyT>()) {
1137  RetreatPastEmptyBuckets();
1138  return;
1139  }
1140  AdvancePastEmptyBuckets();
1141  }
1142 
1143  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1144  // for const iterator destinations so it doesn't end up as a user defined copy
1145  // constructor.
1146  template <bool IsConstSrc,
1147  typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
1150  : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1151 
1153  assert(isHandleInSync() && "invalid iterator access!");
1154  if (shouldReverseIterate<KeyT>())
1155  return Ptr[-1];
1156  return *Ptr;
1157  }
1159  assert(isHandleInSync() && "invalid iterator access!");
1160  if (shouldReverseIterate<KeyT>())
1161  return &(Ptr[-1]);
1162  return Ptr;
1163  }
1164 
1165  bool operator==(const ConstIterator &RHS) const {
1166  assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1167  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1168  assert(getEpochAddress() == RHS.getEpochAddress() &&
1169  "comparing incomparable iterators!");
1170  return Ptr == RHS.Ptr;
1171  }
1172  bool operator!=(const ConstIterator &RHS) const {
1173  assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1174  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1175  assert(getEpochAddress() == RHS.getEpochAddress() &&
1176  "comparing incomparable iterators!");
1177  return Ptr != RHS.Ptr;
1178  }
1179 
1180  inline DenseMapIterator& operator++() { // Preincrement
1181  assert(isHandleInSync() && "invalid iterator access!");
1182  if (shouldReverseIterate<KeyT>()) {
1183  --Ptr;
1184  RetreatPastEmptyBuckets();
1185  return *this;
1186  }
1187  ++Ptr;
1188  AdvancePastEmptyBuckets();
1189  return *this;
1190  }
1191  DenseMapIterator operator++(int) { // Postincrement
1192  assert(isHandleInSync() && "invalid iterator access!");
1193  DenseMapIterator tmp = *this; ++*this; return tmp;
1194  }
1195 
1196 private:
1197  void AdvancePastEmptyBuckets() {
1198  assert(Ptr <= End);
1199  const KeyT Empty = KeyInfoT::getEmptyKey();
1200  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1201 
1202  while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1203  KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1204  ++Ptr;
1205  }
1206 
1207  void RetreatPastEmptyBuckets() {
1208  assert(Ptr >= End);
1209  const KeyT Empty = KeyInfoT::getEmptyKey();
1210  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1211 
1212  while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1213  KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1214  --Ptr;
1215  }
1216 };
1217 
1218 template <typename KeyT, typename ValueT, typename KeyInfoT>
1220  return X.getMemorySize();
1221 }
1222 
1223 } // end namespace llvm
1224 
1225 #endif // LLVM_ADT_DENSEMAP_H
value_type & reference
Definition: DenseMap.h:1120
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:552
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:259
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:296
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:708
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:362
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
const KeyT & getFirst() const
Definition: DenseMap.h:42
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
void init(unsigned InitNumEntries)
Definition: DenseMap.h:719
typename std::conditional< IsConst, const Bucket, Bucket >::type value_type
Definition: DenseMap.h:1118
void init(unsigned InitBuckets)
Definition: DenseMap.h:952
unsigned second
static const KeyT getTombstoneKey()
Definition: DenseMap.h:425
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:322
block Block Frequency true
const_iterator end() const
Definition: DenseMap.h:90
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:192
DenseMap(unsigned InitialReserve=0)
Create a DenseMap wth an optional InitialReserve that guarantee that this number of elements can be i...
Definition: DenseMap.h:662
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:192
#define LLVM_NODISCARD
LLVM_NODISCARD - Warn if a type or return value is discarded.
Definition: Compiler.h:129
reference operator*() const
Definition: DenseMap.h:1152
A base class for data structure classes wishing to make iterators ("handles") pointing into themselve...
Definition: EpochTracker.h:36
size_t capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:932
static unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: StringMap.cpp:25
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
value_type * pointer
Definition: DenseMap.h:1119
void incrementEpoch()
Calling incrementEpoch invalidates all handles pointing into the calling instance.
Definition: EpochTracker.h:44
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:839
A base class for iterator classes ("handles") that wish to poll for iterator invalidating modificatio...
Definition: EpochTracker.h:58
Key
PAL metadata keys.
void shrink_and_clear()
Definition: DenseMap.h:1013
static bool isEqual(const Function &Caller, const Function &Callee)
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:353
static const KeyT getEmptyKey()
Definition: DenseMap.h:419
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:388
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:226
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:830
void grow(unsigned AtLeast)
Definition: DenseMap.h:961
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:288
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:147
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1121
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:410
bool erase(const KeyT &Val)
Definition: DenseMap.h:269
const ValueT & getSecond() const
Definition: DenseMap.h:44
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DenseMap(DenseMap &&other)
Definition: DenseMap.h:669
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:834
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:415
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void grow(unsigned AtLeast)
Definition: DenseMap.h:729
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:434
const void * getEpochAddress() const
Returns a pointer to the epoch word stored in the data structure this handle points into...
Definition: EpochTracker.h:76
DenseMap(const DenseMap &other)
Definition: DenseMap.h:664
const_iterator begin() const
Definition: DenseMap.h:83
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again...
Definition: DenseMap.h:101
unsigned size() const
Definition: DenseMap.h:97
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:640
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:264
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:207
unsigned first
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:927
bool operator==(const ConstIterator &RHS) const
Definition: DenseMap.h:1165
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:638
isPodLike - This is a type trait that is used to determine whether a given type can be copied around ...
Definition: ArrayRef.h:530
DenseMapIterator operator++(int)
Definition: DenseMap.h:1191
auto size(R &&Range, typename std::enable_if< std::is_same< typename std::iterator_traits< decltype(Range.begin())>::iterator_category, std::random_access_iterator_tag >::value, void >::type *=nullptr) -> decltype(std::distance(Range.begin(), Range.end()))
Get the size of a range.
Definition: STLExtras.h:1023
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:855
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:694
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap&#39;s...
Definition: DenseMap.h:315
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:153
void shrink_and_clear()
Definition: DenseMap.h:746
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:933
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:941
DenseMapIterator & operator++()
Definition: DenseMap.h:1180
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:308
bool isHandleInSync() const
Returns true if the DebugEpochBase this Handle is linked to has not called incrementEpoch on itself s...
Definition: EpochTracker.h:71
This class represents an analyzed expression in the program.
void erase(iterator I)
Definition: DenseMap.h:280
iterator begin()
Definition: DenseMap.h:71
bool operator!=(const ConstIterator &RHS) const
Definition: DenseMap.h:1172
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:80
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
void swap(DenseMap &RHS)
Definition: DenseMap.h:685
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:199
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:142
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:94
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:182
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive, key type.
Definition: DenseMap.h:166
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:300
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:845
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:675
LLVM Value Representation.
Definition: Value.h:73
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1148
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:191
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:700
pointer operator->() const
Definition: DenseMap.h:1158
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1130
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive, key type.
Definition: DenseMap.h:246
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:410
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:173