LLVM  15.0.0git
DenseMap.h
Go to the documentation of this file.
1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file defines the DenseMap class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16 
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
22 #include "llvm/Support/MemAlloc.h"
25 #include <algorithm>
26 #include <cassert>
27 #include <cstddef>
28 #include <cstring>
29 #include <initializer_list>
30 #include <iterator>
31 #include <new>
32 #include <type_traits>
33 #include <utility>
34 
35 namespace llvm {
36 
37 namespace detail {
38 
39 // We extend a pair to allow users to override the bucket type with their own
40 // implementation without requiring two members.
41 template <typename KeyT, typename ValueT>
42 struct DenseMapPair : public std::pair<KeyT, ValueT> {
43  using std::pair<KeyT, ValueT>::pair;
44 
45  KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
46  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47  ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48  const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49 };
50 
51 } // end namespace detail
52 
53 template <typename KeyT, typename ValueT,
54  typename KeyInfoT = DenseMapInfo<KeyT>,
56  bool IsConst = false>
58 
59 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60  typename BucketT>
61 class DenseMapBase : public DebugEpochBase {
62  template <typename T>
63  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64 
65 public:
66  using size_type = unsigned;
67  using key_type = KeyT;
69  using value_type = BucketT;
70 
72  using const_iterator =
74 
75  inline iterator begin() {
76  // When the map is empty, avoid the overhead of advancing/retreating past
77  // empty buckets.
78  if (empty())
79  return end();
80  if (shouldReverseIterate<KeyT>())
81  return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82  return makeIterator(getBuckets(), getBucketsEnd(), *this);
83  }
84  inline iterator end() {
85  return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86  }
87  inline const_iterator begin() const {
88  if (empty())
89  return end();
90  if (shouldReverseIterate<KeyT>())
91  return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92  return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93  }
94  inline const_iterator end() const {
95  return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96  }
97 
98  LLVM_NODISCARD bool empty() const {
99  return getNumEntries() == 0;
100  }
101  unsigned size() const { return getNumEntries(); }
102 
103  /// Grow the densemap so that it can contain at least \p NumEntries items
104  /// before resizing again.
105  void reserve(size_type NumEntries) {
106  auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
107  incrementEpoch();
108  if (NumBuckets > getNumBuckets())
109  grow(NumBuckets);
110  }
111 
112  void clear() {
113  incrementEpoch();
114  if (getNumEntries() == 0 && getNumTombstones() == 0) return;
115 
116  // If the capacity of the array is huge, and the # elements used is small,
117  // shrink the array.
118  if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
119  shrink_and_clear();
120  return;
121  }
122 
123  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
124  if (std::is_trivially_destructible<ValueT>::value) {
125  // Use a simpler loop when values don't need destruction.
126  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
127  P->getFirst() = EmptyKey;
128  } else {
129  unsigned NumEntries = getNumEntries();
130  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
131  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
132  if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
133  P->getSecond().~ValueT();
134  --NumEntries;
135  }
136  P->getFirst() = EmptyKey;
137  }
138  }
139  assert(NumEntries == 0 && "Node count imbalance!");
140  (void)NumEntries;
141  }
142  setNumEntries(0);
143  setNumTombstones(0);
144  }
145 
146  /// Return 1 if the specified key is in the map, 0 otherwise.
147  size_type count(const_arg_type_t<KeyT> Val) const {
148  const BucketT *TheBucket;
149  return LookupBucketFor(Val, TheBucket) ? 1 : 0;
150  }
151 
152  iterator find(const_arg_type_t<KeyT> Val) {
153  BucketT *TheBucket;
154  if (LookupBucketFor(Val, TheBucket))
155  return makeIterator(TheBucket,
156  shouldReverseIterate<KeyT>() ? getBuckets()
157  : getBucketsEnd(),
158  *this, true);
159  return end();
160  }
161  const_iterator find(const_arg_type_t<KeyT> Val) const {
162  const BucketT *TheBucket;
163  if (LookupBucketFor(Val, TheBucket))
164  return makeConstIterator(TheBucket,
165  shouldReverseIterate<KeyT>() ? getBuckets()
166  : getBucketsEnd(),
167  *this, true);
168  return end();
169  }
170 
171  /// Alternate version of find() which allows a different, and possibly
172  /// less expensive, key type.
173  /// The DenseMapInfo is responsible for supplying methods
174  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
175  /// type used.
176  template<class LookupKeyT>
177  iterator find_as(const LookupKeyT &Val) {
178  BucketT *TheBucket;
179  if (LookupBucketFor(Val, TheBucket))
180  return makeIterator(TheBucket,
181  shouldReverseIterate<KeyT>() ? getBuckets()
182  : getBucketsEnd(),
183  *this, true);
184  return end();
185  }
186  template<class LookupKeyT>
187  const_iterator find_as(const LookupKeyT &Val) const {
188  const BucketT *TheBucket;
189  if (LookupBucketFor(Val, TheBucket))
190  return makeConstIterator(TheBucket,
191  shouldReverseIterate<KeyT>() ? getBuckets()
192  : getBucketsEnd(),
193  *this, true);
194  return end();
195  }
196 
197  /// lookup - Return the entry for the specified key, or a default
198  /// constructed value if no such entry exists.
199  ValueT lookup(const_arg_type_t<KeyT> Val) const {
200  const BucketT *TheBucket;
201  if (LookupBucketFor(Val, TheBucket))
202  return TheBucket->getSecond();
203  return ValueT();
204  }
205 
206  // Inserts key,value pair into the map if the key isn't already in the map.
207  // If the key is already in the map, it returns false and doesn't update the
208  // value.
209  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
210  return try_emplace(KV.first, KV.second);
211  }
212 
213  // Inserts key,value pair into the map if the key isn't already in the map.
214  // If the key is already in the map, it returns false and doesn't update the
215  // value.
216  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
217  return try_emplace(std::move(KV.first), std::move(KV.second));
218  }
219 
220  // Inserts key,value pair into the map if the key isn't already in the map.
221  // The value is constructed in-place if the key is not in the map, otherwise
222  // it is not moved.
223  template <typename... Ts>
224  std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
225  BucketT *TheBucket;
226  if (LookupBucketFor(Key, TheBucket))
227  return std::make_pair(makeIterator(TheBucket,
228  shouldReverseIterate<KeyT>()
229  ? getBuckets()
230  : getBucketsEnd(),
231  *this, true),
232  false); // Already in map.
233 
234  // Otherwise, insert the new element.
235  TheBucket =
236  InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
237  return std::make_pair(makeIterator(TheBucket,
238  shouldReverseIterate<KeyT>()
239  ? getBuckets()
240  : getBucketsEnd(),
241  *this, true),
242  true);
243  }
244 
245  // Inserts key,value pair into the map if the key isn't already in the map.
246  // The value is constructed in-place if the key is not in the map, otherwise
247  // it is not moved.
248  template <typename... Ts>
249  std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
250  BucketT *TheBucket;
251  if (LookupBucketFor(Key, TheBucket))
252  return std::make_pair(makeIterator(TheBucket,
253  shouldReverseIterate<KeyT>()
254  ? getBuckets()
255  : getBucketsEnd(),
256  *this, true),
257  false); // Already in map.
258 
259  // Otherwise, insert the new element.
260  TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
261  return std::make_pair(makeIterator(TheBucket,
262  shouldReverseIterate<KeyT>()
263  ? getBuckets()
264  : getBucketsEnd(),
265  *this, true),
266  true);
267  }
268 
269  /// Alternate version of insert() which allows a different, and possibly
270  /// less expensive, key type.
271  /// The DenseMapInfo is responsible for supplying methods
272  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
273  /// type used.
274  template <typename LookupKeyT>
275  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
276  const LookupKeyT &Val) {
277  BucketT *TheBucket;
278  if (LookupBucketFor(Val, TheBucket))
279  return std::make_pair(makeIterator(TheBucket,
280  shouldReverseIterate<KeyT>()
281  ? getBuckets()
282  : getBucketsEnd(),
283  *this, true),
284  false); // Already in map.
285 
286  // Otherwise, insert the new element.
287  TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
288  std::move(KV.second), Val);
289  return std::make_pair(makeIterator(TheBucket,
290  shouldReverseIterate<KeyT>()
291  ? getBuckets()
292  : getBucketsEnd(),
293  *this, true),
294  true);
295  }
296 
297  /// insert - Range insertion of pairs.
298  template<typename InputIt>
299  void insert(InputIt I, InputIt E) {
300  for (; I != E; ++I)
301  insert(*I);
302  }
303 
304  bool erase(const KeyT &Val) {
305  BucketT *TheBucket;
306  if (!LookupBucketFor(Val, TheBucket))
307  return false; // not in map.
308 
309  TheBucket->getSecond().~ValueT();
310  TheBucket->getFirst() = getTombstoneKey();
311  decrementNumEntries();
312  incrementNumTombstones();
313  return true;
314  }
315  void erase(iterator I) {
316  BucketT *TheBucket = &*I;
317  TheBucket->getSecond().~ValueT();
318  TheBucket->getFirst() = getTombstoneKey();
319  decrementNumEntries();
320  incrementNumTombstones();
321  }
322 
324  BucketT *TheBucket;
325  if (LookupBucketFor(Key, TheBucket))
326  return *TheBucket;
327 
328  return *InsertIntoBucket(TheBucket, Key);
329  }
330 
332  return FindAndConstruct(Key).second;
333  }
334 
336  BucketT *TheBucket;
337  if (LookupBucketFor(Key, TheBucket))
338  return *TheBucket;
339 
340  return *InsertIntoBucket(TheBucket, std::move(Key));
341  }
342 
344  return FindAndConstruct(std::move(Key)).second;
345  }
346 
347  /// isPointerIntoBucketsArray - Return true if the specified pointer points
348  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
349  /// value in the DenseMap).
350  bool isPointerIntoBucketsArray(const void *Ptr) const {
351  return Ptr >= getBuckets() && Ptr < getBucketsEnd();
352  }
353 
354  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
355  /// array. In conjunction with the previous method, this can be used to
356  /// determine whether an insertion caused the DenseMap to reallocate.
357  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
358 
359 protected:
360  DenseMapBase() = default;
361 
362  void destroyAll() {
363  if (getNumBuckets() == 0) // Nothing to do.
364  return;
365 
366  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
367  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
368  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
369  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
370  P->getSecond().~ValueT();
371  P->getFirst().~KeyT();
372  }
373  }
374 
375  void initEmpty() {
376  setNumEntries(0);
377  setNumTombstones(0);
378 
379  assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
380  "# initial buckets must be a power of two!");
381  const KeyT EmptyKey = getEmptyKey();
382  for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
383  ::new (&B->getFirst()) KeyT(EmptyKey);
384  }
385 
386  /// Returns the number of buckets to allocate to ensure that the DenseMap can
387  /// accommodate \p NumEntries without need to grow().
388  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
389  // Ensure that "NumEntries * 4 < NumBuckets * 3"
390  if (NumEntries == 0)
391  return 0;
392  // +1 is required because of the strict equality.
393  // For example if NumEntries is 48, we need to return 401.
394  return NextPowerOf2(NumEntries * 4 / 3 + 1);
395  }
396 
397  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
398  initEmpty();
399 
400  // Insert all the old elements.
401  const KeyT EmptyKey = getEmptyKey();
402  const KeyT TombstoneKey = getTombstoneKey();
403  for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
404  if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
405  !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
406  // Insert the key/value into the new table.
407  BucketT *DestBucket;
408  bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
409  (void)FoundVal; // silence warning.
410  assert(!FoundVal && "Key already in new map?");
411  DestBucket->getFirst() = std::move(B->getFirst());
412  ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
413  incrementNumEntries();
414 
415  // Free the value.
416  B->getSecond().~ValueT();
417  }
418  B->getFirst().~KeyT();
419  }
420  }
421 
422  template <typename OtherBaseT>
423  void copyFrom(
425  assert(&other != this);
426  assert(getNumBuckets() == other.getNumBuckets());
427 
428  setNumEntries(other.getNumEntries());
429  setNumTombstones(other.getNumTombstones());
430 
431  if (std::is_trivially_copyable<KeyT>::value &&
432  std::is_trivially_copyable<ValueT>::value)
433  memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
434  getNumBuckets() * sizeof(BucketT));
435  else
436  for (size_t i = 0; i < getNumBuckets(); ++i) {
437  ::new (&getBuckets()[i].getFirst())
438  KeyT(other.getBuckets()[i].getFirst());
439  if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
440  !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
441  ::new (&getBuckets()[i].getSecond())
442  ValueT(other.getBuckets()[i].getSecond());
443  }
444  }
445 
446  static unsigned getHashValue(const KeyT &Val) {
447  return KeyInfoT::getHashValue(Val);
448  }
449 
450  template<typename LookupKeyT>
451  static unsigned getHashValue(const LookupKeyT &Val) {
452  return KeyInfoT::getHashValue(Val);
453  }
454 
455  static const KeyT getEmptyKey() {
456  static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
457  "Must pass the derived type to this template!");
458  return KeyInfoT::getEmptyKey();
459  }
460 
461  static const KeyT getTombstoneKey() {
462  return KeyInfoT::getTombstoneKey();
463  }
464 
465 private:
466  iterator makeIterator(BucketT *P, BucketT *E,
467  DebugEpochBase &Epoch,
468  bool NoAdvance=false) {
469  if (shouldReverseIterate<KeyT>()) {
470  BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
471  return iterator(B, E, Epoch, NoAdvance);
472  }
473  return iterator(P, E, Epoch, NoAdvance);
474  }
475 
476  const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
477  const DebugEpochBase &Epoch,
478  const bool NoAdvance=false) const {
479  if (shouldReverseIterate<KeyT>()) {
480  const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
481  return const_iterator(B, E, Epoch, NoAdvance);
482  }
483  return const_iterator(P, E, Epoch, NoAdvance);
484  }
485 
486  unsigned getNumEntries() const {
487  return static_cast<const DerivedT *>(this)->getNumEntries();
488  }
489 
490  void setNumEntries(unsigned Num) {
491  static_cast<DerivedT *>(this)->setNumEntries(Num);
492  }
493 
494  void incrementNumEntries() {
495  setNumEntries(getNumEntries() + 1);
496  }
497 
498  void decrementNumEntries() {
499  setNumEntries(getNumEntries() - 1);
500  }
501 
502  unsigned getNumTombstones() const {
503  return static_cast<const DerivedT *>(this)->getNumTombstones();
504  }
505 
506  void setNumTombstones(unsigned Num) {
507  static_cast<DerivedT *>(this)->setNumTombstones(Num);
508  }
509 
510  void incrementNumTombstones() {
511  setNumTombstones(getNumTombstones() + 1);
512  }
513 
514  void decrementNumTombstones() {
515  setNumTombstones(getNumTombstones() - 1);
516  }
517 
518  const BucketT *getBuckets() const {
519  return static_cast<const DerivedT *>(this)->getBuckets();
520  }
521 
522  BucketT *getBuckets() {
523  return static_cast<DerivedT *>(this)->getBuckets();
524  }
525 
526  unsigned getNumBuckets() const {
527  return static_cast<const DerivedT *>(this)->getNumBuckets();
528  }
529 
530  BucketT *getBucketsEnd() {
531  return getBuckets() + getNumBuckets();
532  }
533 
534  const BucketT *getBucketsEnd() const {
535  return getBuckets() + getNumBuckets();
536  }
537 
538  void grow(unsigned AtLeast) {
539  static_cast<DerivedT *>(this)->grow(AtLeast);
540  }
541 
542  void shrink_and_clear() {
543  static_cast<DerivedT *>(this)->shrink_and_clear();
544  }
545 
546  template <typename KeyArg, typename... ValueArgs>
547  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
548  ValueArgs &&... Values) {
549  TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
550 
551  TheBucket->getFirst() = std::forward<KeyArg>(Key);
552  ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
553  return TheBucket;
554  }
555 
556  template <typename LookupKeyT>
557  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
558  ValueT &&Value, LookupKeyT &Lookup) {
559  TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
560 
561  TheBucket->getFirst() = std::move(Key);
562  ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
563  return TheBucket;
564  }
565 
566  template <typename LookupKeyT>
567  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
568  BucketT *TheBucket) {
569  incrementEpoch();
570 
571  // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
572  // the buckets are empty (meaning that many are filled with tombstones),
573  // grow the table.
574  //
575  // The later case is tricky. For example, if we had one empty bucket with
576  // tons of tombstones, failing lookups (e.g. for insertion) would have to
577  // probe almost the entire table until it found the empty bucket. If the
578  // table completely filled with tombstones, no lookup would ever succeed,
579  // causing infinite loops in lookup.
580  unsigned NewNumEntries = getNumEntries() + 1;
581  unsigned NumBuckets = getNumBuckets();
582  if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
583  this->grow(NumBuckets * 2);
584  LookupBucketFor(Lookup, TheBucket);
585  NumBuckets = getNumBuckets();
586  } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
587  NumBuckets/8)) {
588  this->grow(NumBuckets);
589  LookupBucketFor(Lookup, TheBucket);
590  }
591  assert(TheBucket);
592 
593  // Only update the state after we've grown our bucket space appropriately
594  // so that when growing buckets we have self-consistent entry count.
595  incrementNumEntries();
596 
597  // If we are writing over a tombstone, remember this.
598  const KeyT EmptyKey = getEmptyKey();
599  if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
600  decrementNumTombstones();
601 
602  return TheBucket;
603  }
604 
605  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
606  /// FoundBucket. If the bucket contains the key and a value, this returns
607  /// true, otherwise it returns a bucket with an empty marker or tombstone and
608  /// returns false.
609  template<typename LookupKeyT>
610  bool LookupBucketFor(const LookupKeyT &Val,
611  const BucketT *&FoundBucket) const {
612  const BucketT *BucketsPtr = getBuckets();
613  const unsigned NumBuckets = getNumBuckets();
614 
615  if (NumBuckets == 0) {
616  FoundBucket = nullptr;
617  return false;
618  }
619 
620  // FoundTombstone - Keep track of whether we find a tombstone while probing.
621  const BucketT *FoundTombstone = nullptr;
622  const KeyT EmptyKey = getEmptyKey();
623  const KeyT TombstoneKey = getTombstoneKey();
624  assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
625  !KeyInfoT::isEqual(Val, TombstoneKey) &&
626  "Empty/Tombstone value shouldn't be inserted into map!");
627 
628  unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
629  unsigned ProbeAmt = 1;
630  while (true) {
631  const BucketT *ThisBucket = BucketsPtr + BucketNo;
632  // Found Val's bucket? If so, return it.
633  if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
634  FoundBucket = ThisBucket;
635  return true;
636  }
637 
638  // If we found an empty bucket, the key doesn't exist in the set.
639  // Insert it and return the default value.
640  if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
641  // If we've already seen a tombstone while probing, fill it in instead
642  // of the empty bucket we eventually probed to.
643  FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
644  return false;
645  }
646 
647  // If this is a tombstone, remember it. If Val ends up not in the map, we
648  // prefer to return it than something that would require more probing.
649  if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
650  !FoundTombstone)
651  FoundTombstone = ThisBucket; // Remember the first tombstone found.
652 
653  // Otherwise, it's a hash collision or a tombstone, continue quadratic
654  // probing.
655  BucketNo += ProbeAmt++;
656  BucketNo &= (NumBuckets-1);
657  }
658  }
659 
660  template <typename LookupKeyT>
661  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
662  const BucketT *ConstFoundBucket;
663  bool Result = const_cast<const DenseMapBase *>(this)
664  ->LookupBucketFor(Val, ConstFoundBucket);
665  FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
666  return Result;
667  }
668 
669 public:
670  /// Return the approximate size (in bytes) of the actual map.
671  /// This is just the raw memory used by DenseMap.
672  /// If entries are pointers to objects, the size of the referenced objects
673  /// are not included.
674  size_t getMemorySize() const {
675  return getNumBuckets() * sizeof(BucketT);
676  }
677 };
678 
679 /// Equality comparison for DenseMap.
680 ///
681 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
682 /// is also in RHS, and that no additional pairs are in RHS.
683 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
684 /// complexity is linear, worst case is O(N^2) (if every hash collides).
685 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
686  typename BucketT>
690  if (LHS.size() != RHS.size())
691  return false;
692 
693  for (auto &KV : LHS) {
694  auto I = RHS.find(KV.first);
695  if (I == RHS.end() || I->second != KV.second)
696  return false;
697  }
698 
699  return true;
700 }
701 
702 /// Inequality comparison for DenseMap.
703 ///
704 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
705 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
706  typename BucketT>
710  return !(LHS == RHS);
711 }
712 
713 template <typename KeyT, typename ValueT,
714  typename KeyInfoT = DenseMapInfo<KeyT>,
715  typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
716 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
717  KeyT, ValueT, KeyInfoT, BucketT> {
718  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
719 
720  // Lift some types from the dependent base class into this class for
721  // simplicity of referring to them.
723 
724  BucketT *Buckets;
725  unsigned NumEntries;
726  unsigned NumTombstones;
727  unsigned NumBuckets;
728 
729 public:
730  /// Create a DenseMap with an optional \p InitialReserve that guarantee that
731  /// this number of elements can be inserted in the map without grow()
732  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
733 
734  DenseMap(const DenseMap &other) : BaseT() {
735  init(0);
736  copyFrom(other);
737  }
738 
739  DenseMap(DenseMap &&other) : BaseT() {
740  init(0);
741  swap(other);
742  }
743 
744  template<typename InputIt>
745  DenseMap(const InputIt &I, const InputIt &E) {
746  init(std::distance(I, E));
747  this->insert(I, E);
748  }
749 
750  DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
751  init(Vals.size());
752  this->insert(Vals.begin(), Vals.end());
753  }
754 
756  this->destroyAll();
757  deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
758  }
759 
760  void swap(DenseMap& RHS) {
761  this->incrementEpoch();
762  RHS.incrementEpoch();
763  std::swap(Buckets, RHS.Buckets);
764  std::swap(NumEntries, RHS.NumEntries);
765  std::swap(NumTombstones, RHS.NumTombstones);
766  std::swap(NumBuckets, RHS.NumBuckets);
767  }
768 
769  DenseMap& operator=(const DenseMap& other) {
770  if (&other != this)
771  copyFrom(other);
772  return *this;
773  }
774 
776  this->destroyAll();
777  deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
778  init(0);
779  swap(other);
780  return *this;
781  }
782 
783  void copyFrom(const DenseMap& other) {
784  this->destroyAll();
785  deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
786  if (allocateBuckets(other.NumBuckets)) {
787  this->BaseT::copyFrom(other);
788  } else {
789  NumEntries = 0;
790  NumTombstones = 0;
791  }
792  }
793 
794  void init(unsigned InitNumEntries) {
795  auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
796  if (allocateBuckets(InitBuckets)) {
797  this->BaseT::initEmpty();
798  } else {
799  NumEntries = 0;
800  NumTombstones = 0;
801  }
802  }
803 
804  void grow(unsigned AtLeast) {
805  unsigned OldNumBuckets = NumBuckets;
806  BucketT *OldBuckets = Buckets;
807 
808  allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
809  assert(Buckets);
810  if (!OldBuckets) {
811  this->BaseT::initEmpty();
812  return;
813  }
814 
815  this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
816 
817  // Free the old table.
818  deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
819  alignof(BucketT));
820  }
821 
823  unsigned OldNumBuckets = NumBuckets;
824  unsigned OldNumEntries = NumEntries;
825  this->destroyAll();
826 
827  // Reduce the number of buckets.
828  unsigned NewNumBuckets = 0;
829  if (OldNumEntries)
830  NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
831  if (NewNumBuckets == NumBuckets) {
832  this->BaseT::initEmpty();
833  return;
834  }
835 
836  deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
837  alignof(BucketT));
838  init(NewNumBuckets);
839  }
840 
841 private:
842  unsigned getNumEntries() const {
843  return NumEntries;
844  }
845 
846  void setNumEntries(unsigned Num) {
847  NumEntries = Num;
848  }
849 
850  unsigned getNumTombstones() const {
851  return NumTombstones;
852  }
853 
854  void setNumTombstones(unsigned Num) {
855  NumTombstones = Num;
856  }
857 
858  BucketT *getBuckets() const {
859  return Buckets;
860  }
861 
862  unsigned getNumBuckets() const {
863  return NumBuckets;
864  }
865 
866  bool allocateBuckets(unsigned Num) {
867  NumBuckets = Num;
868  if (NumBuckets == 0) {
869  Buckets = nullptr;
870  return false;
871  }
872 
873  Buckets = static_cast<BucketT *>(
874  allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
875  return true;
876  }
877 };
878 
879 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
880  typename KeyInfoT = DenseMapInfo<KeyT>,
881  typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
883  : public DenseMapBase<
884  SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
885  ValueT, KeyInfoT, BucketT> {
886  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
887 
888  // Lift some types from the dependent base class into this class for
889  // simplicity of referring to them.
891 
892  static_assert(isPowerOf2_64(InlineBuckets),
893  "InlineBuckets must be a power of 2.");
894 
895  unsigned Small : 1;
896  unsigned NumEntries : 31;
897  unsigned NumTombstones;
898 
899  struct LargeRep {
900  BucketT *Buckets;
901  unsigned NumBuckets;
902  };
903 
904  /// A "union" of an inline bucket array and the struct representing
905  /// a large bucket. This union will be discriminated by the 'Small' bit.
907 
908 public:
909  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
910  init(NumInitBuckets);
911  }
912 
913  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
914  init(0);
915  copyFrom(other);
916  }
917 
919  init(0);
920  swap(other);
921  }
922 
923  template<typename InputIt>
924  SmallDenseMap(const InputIt &I, const InputIt &E) {
925  init(NextPowerOf2(std::distance(I, E)));
926  this->insert(I, E);
927  }
928 
929  SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
930  : SmallDenseMap(Vals.begin(), Vals.end()) {}
931 
933  this->destroyAll();
934  deallocateBuckets();
935  }
936 
938  unsigned TmpNumEntries = RHS.NumEntries;
939  RHS.NumEntries = NumEntries;
940  NumEntries = TmpNumEntries;
941  std::swap(NumTombstones, RHS.NumTombstones);
942 
943  const KeyT EmptyKey = this->getEmptyKey();
944  const KeyT TombstoneKey = this->getTombstoneKey();
945  if (Small && RHS.Small) {
946  // If we're swapping inline bucket arrays, we have to cope with some of
947  // the tricky bits of DenseMap's storage system: the buckets are not
948  // fully initialized. Thus we swap every key, but we may have
949  // a one-directional move of the value.
950  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
951  BucketT *LHSB = &getInlineBuckets()[i],
952  *RHSB = &RHS.getInlineBuckets()[i];
953  bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
954  !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
955  bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
956  !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
957  if (hasLHSValue && hasRHSValue) {
958  // Swap together if we can...
959  std::swap(*LHSB, *RHSB);
960  continue;
961  }
962  // Swap separately and handle any asymmetry.
963  std::swap(LHSB->getFirst(), RHSB->getFirst());
964  if (hasLHSValue) {
965  ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
966  LHSB->getSecond().~ValueT();
967  } else if (hasRHSValue) {
968  ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
969  RHSB->getSecond().~ValueT();
970  }
971  }
972  return;
973  }
974  if (!Small && !RHS.Small) {
975  std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
976  std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
977  return;
978  }
979 
980  SmallDenseMap &SmallSide = Small ? *this : RHS;
981  SmallDenseMap &LargeSide = Small ? RHS : *this;
982 
983  // First stash the large side's rep and move the small side across.
984  LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
985  LargeSide.getLargeRep()->~LargeRep();
986  LargeSide.Small = true;
987  // This is similar to the standard move-from-old-buckets, but the bucket
988  // count hasn't actually rotated in this case. So we have to carefully
989  // move construct the keys and values into their new locations, but there
990  // is no need to re-hash things.
991  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
992  BucketT *NewB = &LargeSide.getInlineBuckets()[i],
993  *OldB = &SmallSide.getInlineBuckets()[i];
994  ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
995  OldB->getFirst().~KeyT();
996  if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
997  !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
998  ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
999  OldB->getSecond().~ValueT();
1000  }
1001  }
1002 
1003  // The hard part of moving the small buckets across is done, just move
1004  // the TmpRep into its new home.
1005  SmallSide.Small = false;
1006  new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1007  }
1008 
1010  if (&other != this)
1011  copyFrom(other);
1012  return *this;
1013  }
1014 
1016  this->destroyAll();
1017  deallocateBuckets();
1018  init(0);
1019  swap(other);
1020  return *this;
1021  }
1022 
1023  void copyFrom(const SmallDenseMap& other) {
1024  this->destroyAll();
1025  deallocateBuckets();
1026  Small = true;
1027  if (other.getNumBuckets() > InlineBuckets) {
1028  Small = false;
1029  new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1030  }
1031  this->BaseT::copyFrom(other);
1032  }
1033 
1034  void init(unsigned InitBuckets) {
1035  Small = true;
1036  if (InitBuckets > InlineBuckets) {
1037  Small = false;
1038  new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1039  }
1040  this->BaseT::initEmpty();
1041  }
1042 
1043  void grow(unsigned AtLeast) {
1044  if (AtLeast > InlineBuckets)
1045  AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1046 
1047  if (Small) {
1048  // First move the inline buckets into a temporary storage.
1050  BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1051  BucketT *TmpEnd = TmpBegin;
1052 
1053  // Loop over the buckets, moving non-empty, non-tombstones into the
1054  // temporary storage. Have the loop move the TmpEnd forward as it goes.
1055  const KeyT EmptyKey = this->getEmptyKey();
1056  const KeyT TombstoneKey = this->getTombstoneKey();
1057  for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1058  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1059  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1060  assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1061  "Too many inline buckets!");
1062  ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1063  ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1064  ++TmpEnd;
1065  P->getSecond().~ValueT();
1066  }
1067  P->getFirst().~KeyT();
1068  }
1069 
1070  // AtLeast == InlineBuckets can happen if there are many tombstones,
1071  // and grow() is used to remove them. Usually we always switch to the
1072  // large rep here.
1073  if (AtLeast > InlineBuckets) {
1074  Small = false;
1075  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1076  }
1077  this->moveFromOldBuckets(TmpBegin, TmpEnd);
1078  return;
1079  }
1080 
1081  LargeRep OldRep = std::move(*getLargeRep());
1082  getLargeRep()->~LargeRep();
1083  if (AtLeast <= InlineBuckets) {
1084  Small = true;
1085  } else {
1086  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1087  }
1088 
1089  this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1090 
1091  // Free the old table.
1092  deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1093  alignof(BucketT));
1094  }
1095 
1097  unsigned OldSize = this->size();
1098  this->destroyAll();
1099 
1100  // Reduce the number of buckets.
1101  unsigned NewNumBuckets = 0;
1102  if (OldSize) {
1103  NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1104  if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1105  NewNumBuckets = 64;
1106  }
1107  if ((Small && NewNumBuckets <= InlineBuckets) ||
1108  (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1109  this->BaseT::initEmpty();
1110  return;
1111  }
1112 
1113  deallocateBuckets();
1114  init(NewNumBuckets);
1115  }
1116 
1117 private:
1118  unsigned getNumEntries() const {
1119  return NumEntries;
1120  }
1121 
1122  void setNumEntries(unsigned Num) {
1123  // NumEntries is hardcoded to be 31 bits wide.
1124  assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1125  NumEntries = Num;
1126  }
1127 
1128  unsigned getNumTombstones() const {
1129  return NumTombstones;
1130  }
1131 
1132  void setNumTombstones(unsigned Num) {
1133  NumTombstones = Num;
1134  }
1135 
1136  const BucketT *getInlineBuckets() const {
1137  assert(Small);
1138  // Note that this cast does not violate aliasing rules as we assert that
1139  // the memory's dynamic type is the small, inline bucket buffer, and the
1140  // 'storage' is a POD containing a char buffer.
1141  return reinterpret_cast<const BucketT *>(&storage);
1142  }
1143 
1144  BucketT *getInlineBuckets() {
1145  return const_cast<BucketT *>(
1146  const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1147  }
1148 
1149  const LargeRep *getLargeRep() const {
1150  assert(!Small);
1151  // Note, same rule about aliasing as with getInlineBuckets.
1152  return reinterpret_cast<const LargeRep *>(&storage);
1153  }
1154 
1155  LargeRep *getLargeRep() {
1156  return const_cast<LargeRep *>(
1157  const_cast<const SmallDenseMap *>(this)->getLargeRep());
1158  }
1159 
1160  const BucketT *getBuckets() const {
1161  return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1162  }
1163 
1164  BucketT *getBuckets() {
1165  return const_cast<BucketT *>(
1166  const_cast<const SmallDenseMap *>(this)->getBuckets());
1167  }
1168 
1169  unsigned getNumBuckets() const {
1170  return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1171  }
1172 
1173  void deallocateBuckets() {
1174  if (Small)
1175  return;
1176 
1177  deallocate_buffer(getLargeRep()->Buckets,
1178  sizeof(BucketT) * getLargeRep()->NumBuckets,
1179  alignof(BucketT));
1180  getLargeRep()->~LargeRep();
1181  }
1182 
1183  LargeRep allocateBuckets(unsigned Num) {
1184  assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1185  LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1186  sizeof(BucketT) * Num, alignof(BucketT))),
1187  Num};
1188  return Rep;
1189  }
1190 };
1191 
1192 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1193  bool IsConst>
1194 class DenseMapIterator : DebugEpochBase::HandleBase {
1195  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1196  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1197 
1198 public:
1200  using value_type =
1202  using pointer = value_type *;
1204  using iterator_category = std::forward_iterator_tag;
1205 
1206 private:
1207  pointer Ptr = nullptr;
1208  pointer End = nullptr;
1209 
1210 public:
1211  DenseMapIterator() = default;
1212 
1214  bool NoAdvance = false)
1215  : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1216  assert(isHandleInSync() && "invalid construction!");
1217 
1218  if (NoAdvance) return;
1219  if (shouldReverseIterate<KeyT>()) {
1220  RetreatPastEmptyBuckets();
1221  return;
1222  }
1223  AdvancePastEmptyBuckets();
1224  }
1225 
1226  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1227  // for const iterator destinations so it doesn't end up as a user defined copy
1228  // constructor.
1229  template <bool IsConstSrc,
1230  typename = std::enable_if_t<!IsConstSrc && IsConst>>
1233  : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1234 
1236  assert(isHandleInSync() && "invalid iterator access!");
1237  assert(Ptr != End && "dereferencing end() iterator");
1238  if (shouldReverseIterate<KeyT>())
1239  return Ptr[-1];
1240  return *Ptr;
1241  }
1243  assert(isHandleInSync() && "invalid iterator access!");
1244  assert(Ptr != End && "dereferencing end() iterator");
1245  if (shouldReverseIterate<KeyT>())
1246  return &(Ptr[-1]);
1247  return Ptr;
1248  }
1249 
1250  friend bool operator==(const DenseMapIterator &LHS,
1251  const DenseMapIterator &RHS) {
1252  assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1253  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1254  assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1255  "comparing incomparable iterators!");
1256  return LHS.Ptr == RHS.Ptr;
1257  }
1258 
1259  friend bool operator!=(const DenseMapIterator &LHS,
1260  const DenseMapIterator &RHS) {
1261  return !(LHS == RHS);
1262  }
1263 
1264  inline DenseMapIterator& operator++() { // Preincrement
1265  assert(isHandleInSync() && "invalid iterator access!");
1266  assert(Ptr != End && "incrementing end() iterator");
1267  if (shouldReverseIterate<KeyT>()) {
1268  --Ptr;
1269  RetreatPastEmptyBuckets();
1270  return *this;
1271  }
1272  ++Ptr;
1273  AdvancePastEmptyBuckets();
1274  return *this;
1275  }
1276  DenseMapIterator operator++(int) { // Postincrement
1277  assert(isHandleInSync() && "invalid iterator access!");
1278  DenseMapIterator tmp = *this; ++*this; return tmp;
1279  }
1280 
1281 private:
1282  void AdvancePastEmptyBuckets() {
1283  assert(Ptr <= End);
1284  const KeyT Empty = KeyInfoT::getEmptyKey();
1285  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1286 
1287  while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1288  KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1289  ++Ptr;
1290  }
1291 
1292  void RetreatPastEmptyBuckets() {
1293  assert(Ptr >= End);
1294  const KeyT Empty = KeyInfoT::getEmptyKey();
1295  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1296 
1297  while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1298  KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1299  --Ptr;
1300  }
1301 };
1302 
1303 template <typename KeyT, typename ValueT, typename KeyInfoT>
1305  return X.getMemorySize();
1306 }
1307 
1308 } // end namespace llvm
1309 
1310 #endif // LLVM_ADT_DENSEMAP_H
i
i
Definition: README.txt:29
const_iterator
llvm::SmallDenseMap::init
void init(unsigned InitBuckets)
Definition: DenseMap.h:1034
MathExtras.h
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
KeyT
llvm::detail::DenseMapPair::getSecond
ValueT & getSecond()
Definition: DenseMap.h:47
llvm::DenseMapIterator::operator==
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1250
llvm::DenseMapBase::find_as
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:187
llvm::DenseMap::DenseMap
DenseMap(DenseMap &&other)
Definition: DenseMap.h:739
llvm::DenseMapBase::insert
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:299
ReverseIteration.h
return
return
Definition: README.txt:242
llvm::DenseMapBase::lookup
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:199
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:249
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:909
llvm::DenseMap::DenseMap
DenseMap(const DenseMap &other)
Definition: DenseMap.h:734
llvm::DenseMapBase::getHashValue
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:446
llvm::detail::DenseMapPair::getFirst
KeyT & getFirst()
Definition: DenseMap.h:45
llvm::DenseMapBase::begin
const_iterator begin() const
Definition: DenseMap.h:87
llvm::DenseMapBase::copyFrom
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:423
llvm::allocate_buffer
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:15
llvm::SmallDenseMap
Definition: DenseMap.h:882
llvm::DenseMap::DenseMap
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:750
llvm::SmallDenseMap::copyFrom
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1023
llvm::DenseMapBase::operator[]
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:343
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:304
llvm::capacity_in_bytes
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:828
llvm::DenseMapBase::begin
iterator begin()
Definition: DenseMap.h:75
llvm::detail::DenseMapPair::getFirst
const KeyT & getFirst() const
Definition: DenseMap.h:46
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1886
llvm::DenseMapIterator
Definition: DenseMap.h:57
llvm::deallocate_buffer
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:24
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:216
llvm::operator!=
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:1992
getMinBucketToReserveForEntries
static unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: StringMap.cpp:21
llvm::DenseMapBase::count
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:147
llvm::DenseMapBase::isPointerIntoBucketsArray
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:350
llvm::DenseMapIterator::DenseMapIterator
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1213
llvm::DenseMapIterator::DenseMapIterator
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1231
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
AlignOf.h
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
tmp
alloca< 16 x float >, align 16 %tmp2=alloca< 16 x float >, align 16 store< 16 x float > %A,< 16 x float > *%tmp %s=bitcast< 16 x float > *%tmp to i8 *%s2=bitcast< 16 x float > *%tmp2 to i8 *call void @llvm.memcpy.i64(i8 *%s, i8 *%s2, i64 64, i32 16) %R=load< 16 x float > *%tmp2 ret< 16 x float > %R } declare void @llvm.memcpy.i64(i8 *nocapture, i8 *nocapture, i64, i32) nounwind which compiles to:_foo:subl $140, %esp movaps %xmm3, 112(%esp) movaps %xmm2, 96(%esp) movaps %xmm1, 80(%esp) movaps %xmm0, 64(%esp) movl 60(%esp), %eax movl %eax, 124(%esp) movl 56(%esp), %eax movl %eax, 120(%esp) movl 52(%esp), %eax< many many more 32-bit copies > movaps(%esp), %xmm0 movaps 16(%esp), %xmm1 movaps 32(%esp), %xmm2 movaps 48(%esp), %xmm3 addl $140, %esp ret On Nehalem, it may even be cheaper to just use movups when unaligned than to fall back to lower-granularity chunks. Implement processor-specific optimizations for parity with GCC on these processors. GCC does two optimizations:1. ix86_pad_returns inserts a noop before ret instructions if immediately preceded by a conditional branch or is the target of a jump. 2. ix86_avoid_jump_misspredicts inserts noops in cases where a 16-byte block of code contains more than 3 branches. The first one is done for all AMDs, Core2, and "Generic" The second one is done for:Atom, Pentium Pro, all AMDs, Pentium 4, Nocona, Core 2, and "Generic" Testcase:int x(int a) { return(a &0xf0)> >4 tmp
Definition: README.txt:1347
llvm::DenseMap::swap
void swap(DenseMap &RHS)
Definition: DenseMap.h:760
llvm::AlignedCharArrayUnion
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
llvm::InterleaveGroup
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:284
llvm::detail::DenseMapPair
Definition: DenseMap.h:42
llvm::DenseMapIterator::iterator_category
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1204
llvm::DenseMap::operator=
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:769
new
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj * new
Definition: README.txt:125
llvm::DenseMapBase::const_iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition: DenseMap.h:73
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
llvm::DenseMapBase< DenseMap< llvm::VPInstruction *, llvm::InterleaveGroup< llvm::VPInstruction > *, DenseMapInfo< llvm::VPInstruction * >, llvm::detail::DenseMapPair< llvm::VPInstruction *, llvm::InterleaveGroup< llvm::VPInstruction > * > >, llvm::VPInstruction *, llvm::InterleaveGroup< llvm::VPInstruction > *, DenseMapInfo< llvm::VPInstruction * >, llvm::detail::DenseMapPair< llvm::VPInstruction *, llvm::InterleaveGroup< llvm::VPInstruction > * > >::size_type
unsigned size_type
Definition: DenseMap.h:66
llvm::DenseMapBase::getMinBucketToReserveForEntries
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:388
llvm::DenseMapBase::DenseMapBase
DenseMapBase()=default
llvm::const_pointer_or_const_ref::type
const T & type
Definition: type_traits.h:64
llvm::VPInstruction
This is a concrete Recipe that models a single VPlan-level instruction.
Definition: VPlan.h:742
llvm::DenseMap::shrink_and_clear
void shrink_and_clear()
Definition: DenseMap.h:822
ptrdiff_t
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::AMDGPU::PALMD::Key
Key
PAL metadata keys.
Definition: AMDGPUMetadata.h:486
llvm::NextPowerOf2
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:710
false
Definition: StackSlotColoring.cpp:141
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::CodeModel::Small
@ Small
Definition: CodeGen.h:28
llvm::DenseMap::DenseMap
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:745
llvm::DenseMapBase::destroyAll
void destroyAll()
Definition: DenseMap.h:362
llvm::DenseMapBase::operator[]
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:331
llvm::DenseMapBase::getMemorySize
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:674
llvm::DenseMap::operator=
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:775
llvm::DenseMapIterator::value_type
typename std::conditional< IsConst, const Bucket, Bucket >::type value_type
Definition: DenseMap.h:1201
llvm::DenseMapBase
Definition: DenseMap.h:61
llvm::DenseMapBase::getPointerIntoBucketsArray
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:357
llvm::DenseMap::~DenseMap
~DenseMap()
Definition: DenseMap.h:755
llvm::SmallDenseMap::shrink_and_clear
void shrink_and_clear()
Definition: DenseMap.h:1096
MemAlloc.h
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::Log2_32_Ceil
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:636
llvm::DenseMap::copyFrom
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:783
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:918
llvm::DenseMapBase::find
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:161
type
AMD64 Optimization Manual has some nice information about optimizing integer multiplication by a constant How much of it applies to Intel s X86 implementation There are definite trade offs to xmm0 cvttss2siq rdx jb L3 subss xmm0 rax cvttss2siq rdx xorq rdx rax ret instead of xmm1 cvttss2siq rcx movaps xmm2 subss xmm2 cvttss2siq rax rdx xorq rax ucomiss xmm0 cmovb rax ret Seems like the jb branch has high likelihood of being taken It would have saved a few instructions It s not possible to reference and DH registers in an instruction requiring REX prefix divb and mulb both produce results in AH If isel emits a CopyFromReg which gets turned into a movb and that can be allocated a r8b r15b To get around isel emits a CopyFromReg from AX and then right shift it down by and truncate it It s not pretty but it works We need some register allocation magic to make the hack go which would often require a callee saved register Callees usually need to keep this value live for most of their body so it doesn t add a significant burden on them We currently implement this in however this is suboptimal because it means that it would be quite awkward to implement the optimization for callers A better implementation would be to relax the LLVM IR rules for sret arguments to allow a function with an sret argument to have a non void return type
Definition: README-X86-64.txt:70
llvm::DenseMapBase::clear
void clear()
Definition: DenseMap.h:112
llvm::SmallDenseMap::swap
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:937
llvm::DenseMapIterator::reference
value_type & reference
Definition: DenseMap.h:1203
llvm::DenseMapBase::find_as
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:177
const
aarch64 promote const
Definition: AArch64PromoteConstant.cpp:232
llvm::DenseMapIterator::operator*
reference operator*() const
Definition: DenseMap.h:1235
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::DenseMapIterator::operator!=
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1259
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:716
llvm::SmallDenseMap::operator=
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:1015
llvm::pdb::Empty
@ Empty
Definition: PDBTypes.h:395
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::DenseMapIterator::operator->
pointer operator->() const
Definition: DenseMap.h:1242
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::DenseMap::DenseMap
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:732
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:152
llvm::DenseMapBase::end
const_iterator end() const
Definition: DenseMap.h:94
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1675
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
llvm::operator==
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1990
llvm::DenseMapBase::getEmptyKey
static const KeyT getEmptyKey()
Definition: DenseMap.h:455
llvm::DenseMapIterator::operator++
DenseMapIterator & operator++()
Definition: DenseMap.h:1264
llvm::DenseMapBase::FindAndConstruct
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:323
llvm::DenseMapBase::insert_as
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:275
llvm::AMDGPU::HSAMD::Kernel::Arg::Key::IsConst
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Definition: AMDGPUMetadata.h:195
llvm::DenseMap::init
void init(unsigned InitNumEntries)
Definition: DenseMap.h:794
this
Analysis the ScalarEvolution expression for r is this
Definition: README.txt:8
ValueT
Compiler.h
llvm::DenseMapBase::getTombstoneKey
static const KeyT getTombstoneKey()
Definition: DenseMap.h:461
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:924
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:209
llvm::DenseMapBase::empty
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:98
std
Definition: BitVector.h:851
llvm::SmallDenseMap::~SmallDenseMap
~SmallDenseMap()
Definition: DenseMap.h:932
type_traits.h
llvm::SmallDenseMap::operator=
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:1009
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:84
llvm::DebugEpochBase
Definition: EpochTracker.h:82
llvm::DenseMapBase::size
unsigned size() const
Definition: DenseMap.h:101
LLVM_NODISCARD
#define LLVM_NODISCARD
LLVM_NODISCARD - Warn if a type or return value is discarded.
Definition: Compiler.h:155
llvm::TargetStackID::Value
Value
Definition: TargetFrameLowering.h:27
llvm::SmallDenseMap::grow
void grow(unsigned AtLeast)
Definition: DenseMap.h:1043
llvm::DenseMapBase::erase
void erase(iterator I)
Definition: DenseMap.h:315
isEqual
static bool isEqual(const Function &Caller, const Function &Callee)
Definition: Attributes.cpp:1869
llvm::DebugEpochBase::incrementEpoch
void incrementEpoch()
Definition: EpochTracker.h:84
Lookup
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Definition: X86FloatingPoint.cpp:597
llvm::DenseMapBase::moveFromOldBuckets
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:397
llvm::DenseMapBase::initEmpty
void initEmpty()
Definition: DenseMap.h:375
llvm::DenseMapBase::iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:224
llvm::DenseMapBase::FindAndConstruct
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:335
LLVM_LIKELY
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:219
llvm::DenseMapBase::reserve
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:105
DenseMapInfo.h
EpochTracker.h
llvm::DenseMapBase::getHashValue
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:451
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:394
LLVM_UNLIKELY
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:220
llvm::DenseMapIterator::pointer
value_type * pointer
Definition: DenseMap.h:1202
llvm::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:496
llvm::DenseMapIterator::operator++
DenseMapIterator operator++(int)
Definition: DenseMap.h:1276
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:929
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:913
llvm::DenseMap::grow
void grow(unsigned AtLeast)
Definition: DenseMap.h:804
llvm::detail::DenseMapPair::getSecond
const ValueT & getSecond() const
Definition: DenseMap.h:48