LLVM  13.0.0git
DenseMap.h
Go to the documentation of this file.
1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the DenseMap class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_ADT_DENSEMAP_H
14 #define LLVM_ADT_DENSEMAP_H
15 
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/EpochTracker.h"
18 #include "llvm/Support/AlignOf.h"
19 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/MemAlloc.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <cstddef>
27 #include <cstring>
28 #include <initializer_list>
29 #include <iterator>
30 #include <new>
31 #include <type_traits>
32 #include <utility>
33 
34 namespace llvm {
35 
36 namespace detail {
37 
38 // We extend a pair to allow users to override the bucket type with their own
39 // implementation without requiring two members.
40 template <typename KeyT, typename ValueT>
41 struct DenseMapPair : public std::pair<KeyT, ValueT> {
42  using std::pair<KeyT, ValueT>::pair;
43 
44  KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
45  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
46  ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
47  const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
48 };
49 
50 } // end namespace detail
51 
52 template <typename KeyT, typename ValueT,
53  typename KeyInfoT = DenseMapInfo<KeyT>,
55  bool IsConst = false>
57 
58 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
59  typename BucketT>
60 class DenseMapBase : public DebugEpochBase {
61  template <typename T>
62  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
63 
64 public:
65  using size_type = unsigned;
66  using key_type = KeyT;
68  using value_type = BucketT;
69 
71  using const_iterator =
73 
74  inline iterator begin() {
75  // When the map is empty, avoid the overhead of advancing/retreating past
76  // empty buckets.
77  if (empty())
78  return end();
79  if (shouldReverseIterate<KeyT>())
80  return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
81  return makeIterator(getBuckets(), getBucketsEnd(), *this);
82  }
83  inline iterator end() {
84  return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
85  }
86  inline const_iterator begin() const {
87  if (empty())
88  return end();
89  if (shouldReverseIterate<KeyT>())
90  return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
91  return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
92  }
93  inline const_iterator end() const {
94  return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
95  }
96 
97  LLVM_NODISCARD bool empty() const {
98  return getNumEntries() == 0;
99  }
100  unsigned size() const { return getNumEntries(); }
101 
102  /// Grow the densemap so that it can contain at least \p NumEntries items
103  /// before resizing again.
104  void reserve(size_type NumEntries) {
105  auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
106  incrementEpoch();
107  if (NumBuckets > getNumBuckets())
108  grow(NumBuckets);
109  }
110 
111  void clear() {
112  incrementEpoch();
113  if (getNumEntries() == 0 && getNumTombstones() == 0) return;
114 
115  // If the capacity of the array is huge, and the # elements used is small,
116  // shrink the array.
117  if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
118  shrink_and_clear();
119  return;
120  }
121 
122  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
123  if (std::is_trivially_destructible<ValueT>::value) {
124  // Use a simpler loop when values don't need destruction.
125  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
126  P->getFirst() = EmptyKey;
127  } else {
128  unsigned NumEntries = getNumEntries();
129  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
130  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
131  if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
132  P->getSecond().~ValueT();
133  --NumEntries;
134  }
135  P->getFirst() = EmptyKey;
136  }
137  }
138  assert(NumEntries == 0 && "Node count imbalance!");
139  }
140  setNumEntries(0);
141  setNumTombstones(0);
142  }
143 
144  /// Return 1 if the specified key is in the map, 0 otherwise.
145  size_type count(const_arg_type_t<KeyT> Val) const {
146  const BucketT *TheBucket;
147  return LookupBucketFor(Val, TheBucket) ? 1 : 0;
148  }
149 
150  iterator find(const_arg_type_t<KeyT> Val) {
151  BucketT *TheBucket;
152  if (LookupBucketFor(Val, TheBucket))
153  return makeIterator(TheBucket,
154  shouldReverseIterate<KeyT>() ? getBuckets()
155  : getBucketsEnd(),
156  *this, true);
157  return end();
158  }
159  const_iterator find(const_arg_type_t<KeyT> Val) const {
160  const BucketT *TheBucket;
161  if (LookupBucketFor(Val, TheBucket))
162  return makeConstIterator(TheBucket,
163  shouldReverseIterate<KeyT>() ? getBuckets()
164  : getBucketsEnd(),
165  *this, true);
166  return end();
167  }
168 
169  /// Alternate version of find() which allows a different, and possibly
170  /// less expensive, key type.
171  /// The DenseMapInfo is responsible for supplying methods
172  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
173  /// type used.
174  template<class LookupKeyT>
175  iterator find_as(const LookupKeyT &Val) {
176  BucketT *TheBucket;
177  if (LookupBucketFor(Val, TheBucket))
178  return makeIterator(TheBucket,
179  shouldReverseIterate<KeyT>() ? getBuckets()
180  : getBucketsEnd(),
181  *this, true);
182  return end();
183  }
184  template<class LookupKeyT>
185  const_iterator find_as(const LookupKeyT &Val) const {
186  const BucketT *TheBucket;
187  if (LookupBucketFor(Val, TheBucket))
188  return makeConstIterator(TheBucket,
189  shouldReverseIterate<KeyT>() ? getBuckets()
190  : getBucketsEnd(),
191  *this, true);
192  return end();
193  }
194 
195  /// lookup - Return the entry for the specified key, or a default
196  /// constructed value if no such entry exists.
197  ValueT lookup(const_arg_type_t<KeyT> Val) const {
198  const BucketT *TheBucket;
199  if (LookupBucketFor(Val, TheBucket))
200  return TheBucket->getSecond();
201  return ValueT();
202  }
203 
204  // Inserts key,value pair into the map if the key isn't already in the map.
205  // If the key is already in the map, it returns false and doesn't update the
206  // value.
207  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
208  return try_emplace(KV.first, KV.second);
209  }
210 
211  // Inserts key,value pair into the map if the key isn't already in the map.
212  // If the key is already in the map, it returns false and doesn't update the
213  // value.
214  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
215  return try_emplace(std::move(KV.first), std::move(KV.second));
216  }
217 
218  // Inserts key,value pair into the map if the key isn't already in the map.
219  // The value is constructed in-place if the key is not in the map, otherwise
220  // it is not moved.
221  template <typename... Ts>
222  std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
223  BucketT *TheBucket;
224  if (LookupBucketFor(Key, TheBucket))
225  return std::make_pair(makeIterator(TheBucket,
226  shouldReverseIterate<KeyT>()
227  ? getBuckets()
228  : getBucketsEnd(),
229  *this, true),
230  false); // Already in map.
231 
232  // Otherwise, insert the new element.
233  TheBucket =
234  InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
235  return std::make_pair(makeIterator(TheBucket,
236  shouldReverseIterate<KeyT>()
237  ? getBuckets()
238  : getBucketsEnd(),
239  *this, true),
240  true);
241  }
242 
243  // Inserts key,value pair into the map if the key isn't already in the map.
244  // The value is constructed in-place if the key is not in the map, otherwise
245  // it is not moved.
246  template <typename... Ts>
247  std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
248  BucketT *TheBucket;
249  if (LookupBucketFor(Key, TheBucket))
250  return std::make_pair(makeIterator(TheBucket,
251  shouldReverseIterate<KeyT>()
252  ? getBuckets()
253  : getBucketsEnd(),
254  *this, true),
255  false); // Already in map.
256 
257  // Otherwise, insert the new element.
258  TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
259  return std::make_pair(makeIterator(TheBucket,
260  shouldReverseIterate<KeyT>()
261  ? getBuckets()
262  : getBucketsEnd(),
263  *this, true),
264  true);
265  }
266 
267  /// Alternate version of insert() which allows a different, and possibly
268  /// less expensive, key type.
269  /// The DenseMapInfo is responsible for supplying methods
270  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
271  /// type used.
272  template <typename LookupKeyT>
273  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
274  const LookupKeyT &Val) {
275  BucketT *TheBucket;
276  if (LookupBucketFor(Val, TheBucket))
277  return std::make_pair(makeIterator(TheBucket,
278  shouldReverseIterate<KeyT>()
279  ? getBuckets()
280  : getBucketsEnd(),
281  *this, true),
282  false); // Already in map.
283 
284  // Otherwise, insert the new element.
285  TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
286  std::move(KV.second), Val);
287  return std::make_pair(makeIterator(TheBucket,
288  shouldReverseIterate<KeyT>()
289  ? getBuckets()
290  : getBucketsEnd(),
291  *this, true),
292  true);
293  }
294 
295  /// insert - Range insertion of pairs.
296  template<typename InputIt>
297  void insert(InputIt I, InputIt E) {
298  for (; I != E; ++I)
299  insert(*I);
300  }
301 
302  bool erase(const KeyT &Val) {
303  BucketT *TheBucket;
304  if (!LookupBucketFor(Val, TheBucket))
305  return false; // not in map.
306 
307  TheBucket->getSecond().~ValueT();
308  TheBucket->getFirst() = getTombstoneKey();
309  decrementNumEntries();
310  incrementNumTombstones();
311  return true;
312  }
313  void erase(iterator I) {
314  BucketT *TheBucket = &*I;
315  TheBucket->getSecond().~ValueT();
316  TheBucket->getFirst() = getTombstoneKey();
317  decrementNumEntries();
318  incrementNumTombstones();
319  }
320 
322  BucketT *TheBucket;
323  if (LookupBucketFor(Key, TheBucket))
324  return *TheBucket;
325 
326  return *InsertIntoBucket(TheBucket, Key);
327  }
328 
330  return FindAndConstruct(Key).second;
331  }
332 
334  BucketT *TheBucket;
335  if (LookupBucketFor(Key, TheBucket))
336  return *TheBucket;
337 
338  return *InsertIntoBucket(TheBucket, std::move(Key));
339  }
340 
342  return FindAndConstruct(std::move(Key)).second;
343  }
344 
345  /// isPointerIntoBucketsArray - Return true if the specified pointer points
346  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
347  /// value in the DenseMap).
348  bool isPointerIntoBucketsArray(const void *Ptr) const {
349  return Ptr >= getBuckets() && Ptr < getBucketsEnd();
350  }
351 
352  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
353  /// array. In conjunction with the previous method, this can be used to
354  /// determine whether an insertion caused the DenseMap to reallocate.
355  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
356 
357 protected:
358  DenseMapBase() = default;
359 
360  void destroyAll() {
361  if (getNumBuckets() == 0) // Nothing to do.
362  return;
363 
364  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
365  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
366  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
367  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
368  P->getSecond().~ValueT();
369  P->getFirst().~KeyT();
370  }
371  }
372 
373  void initEmpty() {
374  setNumEntries(0);
375  setNumTombstones(0);
376 
377  assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
378  "# initial buckets must be a power of two!");
379  const KeyT EmptyKey = getEmptyKey();
380  for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
381  ::new (&B->getFirst()) KeyT(EmptyKey);
382  }
383 
384  /// Returns the number of buckets to allocate to ensure that the DenseMap can
385  /// accommodate \p NumEntries without need to grow().
386  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
387  // Ensure that "NumEntries * 4 < NumBuckets * 3"
388  if (NumEntries == 0)
389  return 0;
390  // +1 is required because of the strict equality.
391  // For example if NumEntries is 48, we need to return 401.
392  return NextPowerOf2(NumEntries * 4 / 3 + 1);
393  }
394 
395  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
396  initEmpty();
397 
398  // Insert all the old elements.
399  const KeyT EmptyKey = getEmptyKey();
400  const KeyT TombstoneKey = getTombstoneKey();
401  for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
402  if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
403  !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
404  // Insert the key/value into the new table.
405  BucketT *DestBucket;
406  bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
407  (void)FoundVal; // silence warning.
408  assert(!FoundVal && "Key already in new map?");
409  DestBucket->getFirst() = std::move(B->getFirst());
410  ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
411  incrementNumEntries();
412 
413  // Free the value.
414  B->getSecond().~ValueT();
415  }
416  B->getFirst().~KeyT();
417  }
418  }
419 
420  template <typename OtherBaseT>
421  void copyFrom(
423  assert(&other != this);
424  assert(getNumBuckets() == other.getNumBuckets());
425 
426  setNumEntries(other.getNumEntries());
427  setNumTombstones(other.getNumTombstones());
428 
429  if (std::is_trivially_copyable<KeyT>::value &&
430  std::is_trivially_copyable<ValueT>::value)
431  memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
432  getNumBuckets() * sizeof(BucketT));
433  else
434  for (size_t i = 0; i < getNumBuckets(); ++i) {
435  ::new (&getBuckets()[i].getFirst())
436  KeyT(other.getBuckets()[i].getFirst());
437  if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
438  !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
439  ::new (&getBuckets()[i].getSecond())
440  ValueT(other.getBuckets()[i].getSecond());
441  }
442  }
443 
444  static unsigned getHashValue(const KeyT &Val) {
445  return KeyInfoT::getHashValue(Val);
446  }
447 
448  template<typename LookupKeyT>
449  static unsigned getHashValue(const LookupKeyT &Val) {
450  return KeyInfoT::getHashValue(Val);
451  }
452 
453  static const KeyT getEmptyKey() {
454  static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
455  "Must pass the derived type to this template!");
456  return KeyInfoT::getEmptyKey();
457  }
458 
459  static const KeyT getTombstoneKey() {
460  return KeyInfoT::getTombstoneKey();
461  }
462 
463 private:
464  iterator makeIterator(BucketT *P, BucketT *E,
465  DebugEpochBase &Epoch,
466  bool NoAdvance=false) {
467  if (shouldReverseIterate<KeyT>()) {
468  BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
469  return iterator(B, E, Epoch, NoAdvance);
470  }
471  return iterator(P, E, Epoch, NoAdvance);
472  }
473 
474  const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
475  const DebugEpochBase &Epoch,
476  const bool NoAdvance=false) const {
477  if (shouldReverseIterate<KeyT>()) {
478  const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
479  return const_iterator(B, E, Epoch, NoAdvance);
480  }
481  return const_iterator(P, E, Epoch, NoAdvance);
482  }
483 
484  unsigned getNumEntries() const {
485  return static_cast<const DerivedT *>(this)->getNumEntries();
486  }
487 
488  void setNumEntries(unsigned Num) {
489  static_cast<DerivedT *>(this)->setNumEntries(Num);
490  }
491 
492  void incrementNumEntries() {
493  setNumEntries(getNumEntries() + 1);
494  }
495 
496  void decrementNumEntries() {
497  setNumEntries(getNumEntries() - 1);
498  }
499 
500  unsigned getNumTombstones() const {
501  return static_cast<const DerivedT *>(this)->getNumTombstones();
502  }
503 
504  void setNumTombstones(unsigned Num) {
505  static_cast<DerivedT *>(this)->setNumTombstones(Num);
506  }
507 
508  void incrementNumTombstones() {
509  setNumTombstones(getNumTombstones() + 1);
510  }
511 
512  void decrementNumTombstones() {
513  setNumTombstones(getNumTombstones() - 1);
514  }
515 
516  const BucketT *getBuckets() const {
517  return static_cast<const DerivedT *>(this)->getBuckets();
518  }
519 
520  BucketT *getBuckets() {
521  return static_cast<DerivedT *>(this)->getBuckets();
522  }
523 
524  unsigned getNumBuckets() const {
525  return static_cast<const DerivedT *>(this)->getNumBuckets();
526  }
527 
528  BucketT *getBucketsEnd() {
529  return getBuckets() + getNumBuckets();
530  }
531 
532  const BucketT *getBucketsEnd() const {
533  return getBuckets() + getNumBuckets();
534  }
535 
536  void grow(unsigned AtLeast) {
537  static_cast<DerivedT *>(this)->grow(AtLeast);
538  }
539 
540  void shrink_and_clear() {
541  static_cast<DerivedT *>(this)->shrink_and_clear();
542  }
543 
544  template <typename KeyArg, typename... ValueArgs>
545  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
546  ValueArgs &&... Values) {
547  TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
548 
549  TheBucket->getFirst() = std::forward<KeyArg>(Key);
550  ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
551  return TheBucket;
552  }
553 
554  template <typename LookupKeyT>
555  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
556  ValueT &&Value, LookupKeyT &Lookup) {
557  TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
558 
559  TheBucket->getFirst() = std::move(Key);
560  ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
561  return TheBucket;
562  }
563 
564  template <typename LookupKeyT>
565  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
566  BucketT *TheBucket) {
567  incrementEpoch();
568 
569  // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
570  // the buckets are empty (meaning that many are filled with tombstones),
571  // grow the table.
572  //
573  // The later case is tricky. For example, if we had one empty bucket with
574  // tons of tombstones, failing lookups (e.g. for insertion) would have to
575  // probe almost the entire table until it found the empty bucket. If the
576  // table completely filled with tombstones, no lookup would ever succeed,
577  // causing infinite loops in lookup.
578  unsigned NewNumEntries = getNumEntries() + 1;
579  unsigned NumBuckets = getNumBuckets();
580  if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
581  this->grow(NumBuckets * 2);
582  LookupBucketFor(Lookup, TheBucket);
583  NumBuckets = getNumBuckets();
584  } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
585  NumBuckets/8)) {
586  this->grow(NumBuckets);
587  LookupBucketFor(Lookup, TheBucket);
588  }
589  assert(TheBucket);
590 
591  // Only update the state after we've grown our bucket space appropriately
592  // so that when growing buckets we have self-consistent entry count.
593  incrementNumEntries();
594 
595  // If we are writing over a tombstone, remember this.
596  const KeyT EmptyKey = getEmptyKey();
597  if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
598  decrementNumTombstones();
599 
600  return TheBucket;
601  }
602 
603  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
604  /// FoundBucket. If the bucket contains the key and a value, this returns
605  /// true, otherwise it returns a bucket with an empty marker or tombstone and
606  /// returns false.
607  template<typename LookupKeyT>
608  bool LookupBucketFor(const LookupKeyT &Val,
609  const BucketT *&FoundBucket) const {
610  const BucketT *BucketsPtr = getBuckets();
611  const unsigned NumBuckets = getNumBuckets();
612 
613  if (NumBuckets == 0) {
614  FoundBucket = nullptr;
615  return false;
616  }
617 
618  // FoundTombstone - Keep track of whether we find a tombstone while probing.
619  const BucketT *FoundTombstone = nullptr;
620  const KeyT EmptyKey = getEmptyKey();
621  const KeyT TombstoneKey = getTombstoneKey();
622  assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
623  !KeyInfoT::isEqual(Val, TombstoneKey) &&
624  "Empty/Tombstone value shouldn't be inserted into map!");
625 
626  unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
627  unsigned ProbeAmt = 1;
628  while (true) {
629  const BucketT *ThisBucket = BucketsPtr + BucketNo;
630  // Found Val's bucket? If so, return it.
631  if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
632  FoundBucket = ThisBucket;
633  return true;
634  }
635 
636  // If we found an empty bucket, the key doesn't exist in the set.
637  // Insert it and return the default value.
638  if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
639  // If we've already seen a tombstone while probing, fill it in instead
640  // of the empty bucket we eventually probed to.
641  FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
642  return false;
643  }
644 
645  // If this is a tombstone, remember it. If Val ends up not in the map, we
646  // prefer to return it than something that would require more probing.
647  if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
648  !FoundTombstone)
649  FoundTombstone = ThisBucket; // Remember the first tombstone found.
650 
651  // Otherwise, it's a hash collision or a tombstone, continue quadratic
652  // probing.
653  BucketNo += ProbeAmt++;
654  BucketNo &= (NumBuckets-1);
655  }
656  }
657 
658  template <typename LookupKeyT>
659  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
660  const BucketT *ConstFoundBucket;
661  bool Result = const_cast<const DenseMapBase *>(this)
662  ->LookupBucketFor(Val, ConstFoundBucket);
663  FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
664  return Result;
665  }
666 
667 public:
668  /// Return the approximate size (in bytes) of the actual map.
669  /// This is just the raw memory used by DenseMap.
670  /// If entries are pointers to objects, the size of the referenced objects
671  /// are not included.
672  size_t getMemorySize() const {
673  return getNumBuckets() * sizeof(BucketT);
674  }
675 };
676 
677 /// Equality comparison for DenseMap.
678 ///
679 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
680 /// is also in RHS, and that no additional pairs are in RHS.
681 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
682 /// complexity is linear, worst case is O(N^2) (if every hash collides).
683 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
684  typename BucketT>
688  if (LHS.size() != RHS.size())
689  return false;
690 
691  for (auto &KV : LHS) {
692  auto I = RHS.find(KV.first);
693  if (I == RHS.end() || I->second != KV.second)
694  return false;
695  }
696 
697  return true;
698 }
699 
700 /// Inequality comparison for DenseMap.
701 ///
702 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
703 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
704  typename BucketT>
708  return !(LHS == RHS);
709 }
710 
711 template <typename KeyT, typename ValueT,
712  typename KeyInfoT = DenseMapInfo<KeyT>,
713  typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
714 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
715  KeyT, ValueT, KeyInfoT, BucketT> {
716  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
717 
718  // Lift some types from the dependent base class into this class for
719  // simplicity of referring to them.
721 
722  BucketT *Buckets;
723  unsigned NumEntries;
724  unsigned NumTombstones;
725  unsigned NumBuckets;
726 
727 public:
728  /// Create a DenseMap with an optional \p InitialReserve that guarantee that
729  /// this number of elements can be inserted in the map without grow()
730  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
731 
732  DenseMap(const DenseMap &other) : BaseT() {
733  init(0);
734  copyFrom(other);
735  }
736 
737  DenseMap(DenseMap &&other) : BaseT() {
738  init(0);
739  swap(other);
740  }
741 
742  template<typename InputIt>
743  DenseMap(const InputIt &I, const InputIt &E) {
744  init(std::distance(I, E));
745  this->insert(I, E);
746  }
747 
748  DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
749  init(Vals.size());
750  this->insert(Vals.begin(), Vals.end());
751  }
752 
754  this->destroyAll();
755  deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
756  }
757 
758  void swap(DenseMap& RHS) {
759  this->incrementEpoch();
760  RHS.incrementEpoch();
761  std::swap(Buckets, RHS.Buckets);
762  std::swap(NumEntries, RHS.NumEntries);
763  std::swap(NumTombstones, RHS.NumTombstones);
764  std::swap(NumBuckets, RHS.NumBuckets);
765  }
766 
767  DenseMap& operator=(const DenseMap& other) {
768  if (&other != this)
769  copyFrom(other);
770  return *this;
771  }
772 
774  this->destroyAll();
775  deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
776  init(0);
777  swap(other);
778  return *this;
779  }
780 
781  void copyFrom(const DenseMap& other) {
782  this->destroyAll();
783  deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
784  if (allocateBuckets(other.NumBuckets)) {
785  this->BaseT::copyFrom(other);
786  } else {
787  NumEntries = 0;
788  NumTombstones = 0;
789  }
790  }
791 
792  void init(unsigned InitNumEntries) {
793  auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
794  if (allocateBuckets(InitBuckets)) {
795  this->BaseT::initEmpty();
796  } else {
797  NumEntries = 0;
798  NumTombstones = 0;
799  }
800  }
801 
802  void grow(unsigned AtLeast) {
803  unsigned OldNumBuckets = NumBuckets;
804  BucketT *OldBuckets = Buckets;
805 
806  allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
807  assert(Buckets);
808  if (!OldBuckets) {
809  this->BaseT::initEmpty();
810  return;
811  }
812 
813  this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
814 
815  // Free the old table.
816  deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
817  alignof(BucketT));
818  }
819 
821  unsigned OldNumBuckets = NumBuckets;
822  unsigned OldNumEntries = NumEntries;
823  this->destroyAll();
824 
825  // Reduce the number of buckets.
826  unsigned NewNumBuckets = 0;
827  if (OldNumEntries)
828  NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
829  if (NewNumBuckets == NumBuckets) {
830  this->BaseT::initEmpty();
831  return;
832  }
833 
834  deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
835  alignof(BucketT));
836  init(NewNumBuckets);
837  }
838 
839 private:
840  unsigned getNumEntries() const {
841  return NumEntries;
842  }
843 
844  void setNumEntries(unsigned Num) {
845  NumEntries = Num;
846  }
847 
848  unsigned getNumTombstones() const {
849  return NumTombstones;
850  }
851 
852  void setNumTombstones(unsigned Num) {
853  NumTombstones = Num;
854  }
855 
856  BucketT *getBuckets() const {
857  return Buckets;
858  }
859 
860  unsigned getNumBuckets() const {
861  return NumBuckets;
862  }
863 
864  bool allocateBuckets(unsigned Num) {
865  NumBuckets = Num;
866  if (NumBuckets == 0) {
867  Buckets = nullptr;
868  return false;
869  }
870 
871  Buckets = static_cast<BucketT *>(
872  allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
873  return true;
874  }
875 };
876 
877 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
878  typename KeyInfoT = DenseMapInfo<KeyT>,
879  typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
881  : public DenseMapBase<
882  SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
883  ValueT, KeyInfoT, BucketT> {
884  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
885 
886  // Lift some types from the dependent base class into this class for
887  // simplicity of referring to them.
889 
890  static_assert(isPowerOf2_64(InlineBuckets),
891  "InlineBuckets must be a power of 2.");
892 
893  unsigned Small : 1;
894  unsigned NumEntries : 31;
895  unsigned NumTombstones;
896 
897  struct LargeRep {
898  BucketT *Buckets;
899  unsigned NumBuckets;
900  };
901 
902  /// A "union" of an inline bucket array and the struct representing
903  /// a large bucket. This union will be discriminated by the 'Small' bit.
905 
906 public:
907  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
908  init(NumInitBuckets);
909  }
910 
911  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
912  init(0);
913  copyFrom(other);
914  }
915 
917  init(0);
918  swap(other);
919  }
920 
921  template<typename InputIt>
922  SmallDenseMap(const InputIt &I, const InputIt &E) {
923  init(NextPowerOf2(std::distance(I, E)));
924  this->insert(I, E);
925  }
926 
928  this->destroyAll();
929  deallocateBuckets();
930  }
931 
932  void swap(SmallDenseMap& RHS) {
933  unsigned TmpNumEntries = RHS.NumEntries;
934  RHS.NumEntries = NumEntries;
935  NumEntries = TmpNumEntries;
936  std::swap(NumTombstones, RHS.NumTombstones);
937 
938  const KeyT EmptyKey = this->getEmptyKey();
939  const KeyT TombstoneKey = this->getTombstoneKey();
940  if (Small && RHS.Small) {
941  // If we're swapping inline bucket arrays, we have to cope with some of
942  // the tricky bits of DenseMap's storage system: the buckets are not
943  // fully initialized. Thus we swap every key, but we may have
944  // a one-directional move of the value.
945  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
946  BucketT *LHSB = &getInlineBuckets()[i],
947  *RHSB = &RHS.getInlineBuckets()[i];
948  bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
949  !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
950  bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
951  !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
952  if (hasLHSValue && hasRHSValue) {
953  // Swap together if we can...
954  std::swap(*LHSB, *RHSB);
955  continue;
956  }
957  // Swap separately and handle any asymmetry.
958  std::swap(LHSB->getFirst(), RHSB->getFirst());
959  if (hasLHSValue) {
960  ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
961  LHSB->getSecond().~ValueT();
962  } else if (hasRHSValue) {
963  ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
964  RHSB->getSecond().~ValueT();
965  }
966  }
967  return;
968  }
969  if (!Small && !RHS.Small) {
970  std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
971  std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
972  return;
973  }
974 
975  SmallDenseMap &SmallSide = Small ? *this : RHS;
976  SmallDenseMap &LargeSide = Small ? RHS : *this;
977 
978  // First stash the large side's rep and move the small side across.
979  LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
980  LargeSide.getLargeRep()->~LargeRep();
981  LargeSide.Small = true;
982  // This is similar to the standard move-from-old-buckets, but the bucket
983  // count hasn't actually rotated in this case. So we have to carefully
984  // move construct the keys and values into their new locations, but there
985  // is no need to re-hash things.
986  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
987  BucketT *NewB = &LargeSide.getInlineBuckets()[i],
988  *OldB = &SmallSide.getInlineBuckets()[i];
989  ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
990  OldB->getFirst().~KeyT();
991  if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
992  !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
993  ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
994  OldB->getSecond().~ValueT();
995  }
996  }
997 
998  // The hard part of moving the small buckets across is done, just move
999  // the TmpRep into its new home.
1000  SmallSide.Small = false;
1001  new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1002  }
1003 
1005  if (&other != this)
1006  copyFrom(other);
1007  return *this;
1008  }
1009 
1011  this->destroyAll();
1012  deallocateBuckets();
1013  init(0);
1014  swap(other);
1015  return *this;
1016  }
1017 
1018  void copyFrom(const SmallDenseMap& other) {
1019  this->destroyAll();
1020  deallocateBuckets();
1021  Small = true;
1022  if (other.getNumBuckets() > InlineBuckets) {
1023  Small = false;
1024  new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1025  }
1026  this->BaseT::copyFrom(other);
1027  }
1028 
1029  void init(unsigned InitBuckets) {
1030  Small = true;
1031  if (InitBuckets > InlineBuckets) {
1032  Small = false;
1033  new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1034  }
1035  this->BaseT::initEmpty();
1036  }
1037 
1038  void grow(unsigned AtLeast) {
1039  if (AtLeast > InlineBuckets)
1040  AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1041 
1042  if (Small) {
1043  // First move the inline buckets into a temporary storage.
1045  BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1046  BucketT *TmpEnd = TmpBegin;
1047 
1048  // Loop over the buckets, moving non-empty, non-tombstones into the
1049  // temporary storage. Have the loop move the TmpEnd forward as it goes.
1050  const KeyT EmptyKey = this->getEmptyKey();
1051  const KeyT TombstoneKey = this->getTombstoneKey();
1052  for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1053  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1054  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1055  assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1056  "Too many inline buckets!");
1057  ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1058  ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1059  ++TmpEnd;
1060  P->getSecond().~ValueT();
1061  }
1062  P->getFirst().~KeyT();
1063  }
1064 
1065  // AtLeast == InlineBuckets can happen if there are many tombstones,
1066  // and grow() is used to remove them. Usually we always switch to the
1067  // large rep here.
1068  if (AtLeast > InlineBuckets) {
1069  Small = false;
1070  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1071  }
1072  this->moveFromOldBuckets(TmpBegin, TmpEnd);
1073  return;
1074  }
1075 
1076  LargeRep OldRep = std::move(*getLargeRep());
1077  getLargeRep()->~LargeRep();
1078  if (AtLeast <= InlineBuckets) {
1079  Small = true;
1080  } else {
1081  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1082  }
1083 
1084  this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1085 
1086  // Free the old table.
1087  deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1088  alignof(BucketT));
1089  }
1090 
1092  unsigned OldSize = this->size();
1093  this->destroyAll();
1094 
1095  // Reduce the number of buckets.
1096  unsigned NewNumBuckets = 0;
1097  if (OldSize) {
1098  NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1099  if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1100  NewNumBuckets = 64;
1101  }
1102  if ((Small && NewNumBuckets <= InlineBuckets) ||
1103  (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1104  this->BaseT::initEmpty();
1105  return;
1106  }
1107 
1108  deallocateBuckets();
1109  init(NewNumBuckets);
1110  }
1111 
1112 private:
1113  unsigned getNumEntries() const {
1114  return NumEntries;
1115  }
1116 
1117  void setNumEntries(unsigned Num) {
1118  // NumEntries is hardcoded to be 31 bits wide.
1119  assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1120  NumEntries = Num;
1121  }
1122 
1123  unsigned getNumTombstones() const {
1124  return NumTombstones;
1125  }
1126 
1127  void setNumTombstones(unsigned Num) {
1128  NumTombstones = Num;
1129  }
1130 
1131  const BucketT *getInlineBuckets() const {
1132  assert(Small);
1133  // Note that this cast does not violate aliasing rules as we assert that
1134  // the memory's dynamic type is the small, inline bucket buffer, and the
1135  // 'storage' is a POD containing a char buffer.
1136  return reinterpret_cast<const BucketT *>(&storage);
1137  }
1138 
1139  BucketT *getInlineBuckets() {
1140  return const_cast<BucketT *>(
1141  const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1142  }
1143 
1144  const LargeRep *getLargeRep() const {
1145  assert(!Small);
1146  // Note, same rule about aliasing as with getInlineBuckets.
1147  return reinterpret_cast<const LargeRep *>(&storage);
1148  }
1149 
1150  LargeRep *getLargeRep() {
1151  return const_cast<LargeRep *>(
1152  const_cast<const SmallDenseMap *>(this)->getLargeRep());
1153  }
1154 
1155  const BucketT *getBuckets() const {
1156  return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1157  }
1158 
1159  BucketT *getBuckets() {
1160  return const_cast<BucketT *>(
1161  const_cast<const SmallDenseMap *>(this)->getBuckets());
1162  }
1163 
1164  unsigned getNumBuckets() const {
1165  return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1166  }
1167 
1168  void deallocateBuckets() {
1169  if (Small)
1170  return;
1171 
1172  deallocate_buffer(getLargeRep()->Buckets,
1173  sizeof(BucketT) * getLargeRep()->NumBuckets,
1174  alignof(BucketT));
1175  getLargeRep()->~LargeRep();
1176  }
1177 
1178  LargeRep allocateBuckets(unsigned Num) {
1179  assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1180  LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1181  sizeof(BucketT) * Num, alignof(BucketT))),
1182  Num};
1183  return Rep;
1184  }
1185 };
1186 
1187 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1188  bool IsConst>
1189 class DenseMapIterator : DebugEpochBase::HandleBase {
1190  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1191  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1192 
1193 public:
1195  using value_type =
1197  using pointer = value_type *;
1199  using iterator_category = std::forward_iterator_tag;
1200 
1201 private:
1202  pointer Ptr = nullptr;
1203  pointer End = nullptr;
1204 
1205 public:
1206  DenseMapIterator() = default;
1207 
1209  bool NoAdvance = false)
1210  : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1211  assert(isHandleInSync() && "invalid construction!");
1212 
1213  if (NoAdvance) return;
1214  if (shouldReverseIterate<KeyT>()) {
1215  RetreatPastEmptyBuckets();
1216  return;
1217  }
1218  AdvancePastEmptyBuckets();
1219  }
1220 
1221  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1222  // for const iterator destinations so it doesn't end up as a user defined copy
1223  // constructor.
1224  template <bool IsConstSrc,
1225  typename = std::enable_if_t<!IsConstSrc && IsConst>>
1228  : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1229 
1231  assert(isHandleInSync() && "invalid iterator access!");
1232  assert(Ptr != End && "dereferencing end() iterator");
1233  if (shouldReverseIterate<KeyT>())
1234  return Ptr[-1];
1235  return *Ptr;
1236  }
1238  assert(isHandleInSync() && "invalid iterator access!");
1239  assert(Ptr != End && "dereferencing end() iterator");
1240  if (shouldReverseIterate<KeyT>())
1241  return &(Ptr[-1]);
1242  return Ptr;
1243  }
1244 
1245  friend bool operator==(const DenseMapIterator &LHS,
1246  const DenseMapIterator &RHS) {
1247  assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1248  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1249  assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1250  "comparing incomparable iterators!");
1251  return LHS.Ptr == RHS.Ptr;
1252  }
1253 
1254  friend bool operator!=(const DenseMapIterator &LHS,
1255  const DenseMapIterator &RHS) {
1256  return !(LHS == RHS);
1257  }
1258 
1259  inline DenseMapIterator& operator++() { // Preincrement
1260  assert(isHandleInSync() && "invalid iterator access!");
1261  assert(Ptr != End && "incrementing end() iterator");
1262  if (shouldReverseIterate<KeyT>()) {
1263  --Ptr;
1264  RetreatPastEmptyBuckets();
1265  return *this;
1266  }
1267  ++Ptr;
1268  AdvancePastEmptyBuckets();
1269  return *this;
1270  }
1271  DenseMapIterator operator++(int) { // Postincrement
1272  assert(isHandleInSync() && "invalid iterator access!");
1273  DenseMapIterator tmp = *this; ++*this; return tmp;
1274  }
1275 
1276 private:
1277  void AdvancePastEmptyBuckets() {
1278  assert(Ptr <= End);
1279  const KeyT Empty = KeyInfoT::getEmptyKey();
1280  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1281 
1282  while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1283  KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1284  ++Ptr;
1285  }
1286 
1287  void RetreatPastEmptyBuckets() {
1288  assert(Ptr >= End);
1289  const KeyT Empty = KeyInfoT::getEmptyKey();
1290  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1291 
1292  while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1293  KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1294  --Ptr;
1295  }
1296 };
1297 
1298 template <typename KeyT, typename ValueT, typename KeyInfoT>
1300  return X.getMemorySize();
1301 }
1302 
1303 } // end namespace llvm
1304 
1305 #endif // LLVM_ADT_DENSEMAP_H
i
i
Definition: README.txt:29
llvm::NextPowerOf2
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:683
llvm::DenseMapBase< SmallDenseMap< llvm::Value *, int, 4, DenseMapInfo< llvm::Value * >, llvm::detail::DenseMapPair< llvm::Value *, int > >, llvm::Value *, int, DenseMapInfo< llvm::Value * >, llvm::detail::DenseMapPair< llvm::Value *, int > >::mapped_type
int mapped_type
Definition: DenseMap.h:67
llvm::SmallDenseMap::init
void init(unsigned InitBuckets)
Definition: DenseMap.h:1029
MathExtras.h
llvm
Definition: AllocatorList.h:23
KeyT
llvm::detail::DenseMapPair::getSecond
ValueT & getSecond()
Definition: DenseMap.h:46
llvm::DenseMapIterator::operator==
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1245
llvm::DenseMapBase::find_as
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:185
llvm::DenseMap::DenseMap
DenseMap(DenseMap &&other)
Definition: DenseMap.h:737
llvm::DenseMapBase::insert
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:297
ReverseIteration.h
return
return
Definition: README.txt:242
llvm::DenseMapBase::lookup
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:247
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:907
llvm::DenseMap::DenseMap
DenseMap(const DenseMap &other)
Definition: DenseMap.h:732
llvm::DenseMapBase::getHashValue
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:444
llvm::detail::DenseMapPair::getFirst
KeyT & getFirst()
Definition: DenseMap.h:44
llvm::DenseMapBase::begin
const_iterator begin() const
Definition: DenseMap.h:86
llvm::DenseMapBase::copyFrom
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:421
llvm::allocate_buffer
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:14
llvm::SmallDenseMap
Definition: DenseMap.h:880
llvm::DenseMap::DenseMap
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:748
llvm::SmallDenseMap::copyFrom
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1018
llvm::DenseMapBase::operator[]
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:341
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::DenseMapBase::begin
iterator begin()
Definition: DenseMap.h:74
llvm::detail::DenseMapPair::getFirst
const KeyT & getFirst() const
Definition: DenseMap.h:45
llvm::DenseMapIterator
Definition: DenseMap.h:56
llvm::deallocate_buffer
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:23
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:214
llvm::operator!=
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2039
getMinBucketToReserveForEntries
static unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: StringMap.cpp:22
llvm::DenseMapBase::count
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
llvm::DenseMapBase::isPointerIntoBucketsArray
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:348
llvm::DenseMapIterator::DenseMapIterator
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1208
llvm::DenseMapIterator::DenseMapIterator
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1226
AlignOf.h
tmp
alloca< 16 x float >, align 16 %tmp2=alloca< 16 x float >, align 16 store< 16 x float > %A,< 16 x float > *%tmp %s=bitcast< 16 x float > *%tmp to i8 *%s2=bitcast< 16 x float > *%tmp2 to i8 *call void @llvm.memcpy.i64(i8 *%s, i8 *%s2, i64 64, i32 16) %R=load< 16 x float > *%tmp2 ret< 16 x float > %R } declare void @llvm.memcpy.i64(i8 *nocapture, i8 *nocapture, i64, i32) nounwind which compiles to:_foo:subl $140, %esp movaps %xmm3, 112(%esp) movaps %xmm2, 96(%esp) movaps %xmm1, 80(%esp) movaps %xmm0, 64(%esp) movl 60(%esp), %eax movl %eax, 124(%esp) movl 56(%esp), %eax movl %eax, 120(%esp) movl 52(%esp), %eax< many many more 32-bit copies > movaps(%esp), %xmm0 movaps 16(%esp), %xmm1 movaps 32(%esp), %xmm2 movaps 48(%esp), %xmm3 addl $140, %esp ret On Nehalem, it may even be cheaper to just use movups when unaligned than to fall back to lower-granularity chunks. Implement processor-specific optimizations for parity with GCC on these processors. GCC does two optimizations:1. ix86_pad_returns inserts a noop before ret instructions if immediately preceded by a conditional branch or is the target of a jump. 2. ix86_avoid_jump_misspredicts inserts noops in cases where a 16-byte block of code contains more than 3 branches. The first one is done for all AMDs, Core2, and "Generic" The second one is done for:Atom, Pentium Pro, all AMDs, Pentium 4, Nocona, Core 2, and "Generic" Testcase:int x(int a) { return(a &0xf0)> >4 tmp
Definition: README.txt:1347
llvm::DenseMap::swap
void swap(DenseMap &RHS)
Definition: DenseMap.h:758
llvm::AlignedCharArrayUnion
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
llvm::detail::DenseMapPair
Definition: DenseMap.h:41
llvm::DenseMapIterator::iterator_category
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1199
llvm::DenseMap::operator=
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:767
new
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj * new
Definition: README.txt:125
llvm::DenseMapBase::const_iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition: DenseMap.h:72
llvm::DenseMapBase< SmallDenseMap< llvm::Value *, int, 4, DenseMapInfo< llvm::Value * >, llvm::detail::DenseMapPair< llvm::Value *, int > >, llvm::Value *, int, DenseMapInfo< llvm::Value * >, llvm::detail::DenseMapPair< llvm::Value *, int > >::size_type
unsigned size_type
Definition: DenseMap.h:65
llvm::DenseMapBase::getMinBucketToReserveForEntries
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:386
llvm::DenseMapBase::DenseMapBase
DenseMapBase()=default
llvm::const_pointer_or_const_ref::type
const T & type
Definition: type_traits.h:64
llvm::DenseMap::shrink_and_clear
void shrink_and_clear()
Definition: DenseMap.h:820
ptrdiff_t
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::AMDGPU::PALMD::Key
Key
PAL metadata keys.
Definition: AMDGPUMetadata.h:481
false
Definition: StackSlotColoring.cpp:142
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::CodeModel::Small
@ Small
Definition: CodeGen.h:28
llvm::DenseMap::DenseMap
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:743
llvm::DenseMapBase::destroyAll
void destroyAll()
Definition: DenseMap.h:360
llvm::DenseMapBase::operator[]
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:329
llvm::DenseMapBase::getMemorySize
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:672
llvm::DenseMap::operator=
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:773
llvm::DenseMapIterator::value_type
typename std::conditional< IsConst, const Bucket, Bucket >::type value_type
Definition: DenseMap.h:1196
llvm::DenseMapBase
Definition: DenseMap.h:60
llvm::DenseMapBase::getPointerIntoBucketsArray
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:355
llvm::DenseMap::~DenseMap
~DenseMap()
Definition: DenseMap.h:753
llvm::SmallDenseMap::shrink_and_clear
void shrink_and_clear()
Definition: DenseMap.h:1091
MemAlloc.h
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::Log2_32_Ceil
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:609
llvm::DenseMap::copyFrom
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:781
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:916
llvm::DenseMapBase::find
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:159
type
AMD64 Optimization Manual has some nice information about optimizing integer multiplication by a constant How much of it applies to Intel s X86 implementation There are definite trade offs to xmm0 cvttss2siq rdx jb L3 subss xmm0 rax cvttss2siq rdx xorq rdx rax ret instead of xmm1 cvttss2siq rcx movaps xmm2 subss xmm2 cvttss2siq rax rdx xorq rax ucomiss xmm0 cmovb rax ret Seems like the jb branch has high likelihood of being taken It would have saved a few instructions It s not possible to reference and DH registers in an instruction requiring REX prefix divb and mulb both produce results in AH If isel emits a CopyFromReg which gets turned into a movb and that can be allocated a r8b r15b To get around isel emits a CopyFromReg from AX and then right shift it down by and truncate it It s not pretty but it works We need some register allocation magic to make the hack go which would often require a callee saved register Callees usually need to keep this value live for most of their body so it doesn t add a significant burden on them We currently implement this in however this is suboptimal because it means that it would be quite awkward to implement the optimization for callers A better implementation would be to relax the LLVM IR rules for sret arguments to allow a function with an sret argument to have a non void return type
Definition: README-X86-64.txt:70
llvm::DenseMapBase::clear
void clear()
Definition: DenseMap.h:111
llvm::SmallDenseMap::swap
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:932
llvm::DenseMapIterator::reference
value_type & reference
Definition: DenseMap.h:1198
llvm::DenseMapBase::find_as
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:175
const
aarch64 promote const
Definition: AArch64PromoteConstant.cpp:232
llvm::DenseMapIterator::operator*
reference operator*() const
Definition: DenseMap.h:1230
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::DenseMapIterator::operator!=
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1254
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:714
llvm::SmallDenseMap::operator=
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:1010
llvm::pdb::Empty
@ Empty
Definition: PDBTypes.h:394
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::DenseMapIterator::operator->
pointer operator->() const
Definition: DenseMap.h:1237
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
llvm::DenseMap::DenseMap
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:730
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::DenseMapBase::end
const_iterator end() const
Definition: DenseMap.h:93
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1540
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
llvm::operator==
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:2037
llvm::DenseMapBase::getEmptyKey
static const KeyT getEmptyKey()
Definition: DenseMap.h:453
llvm::DenseMapIterator::operator++
DenseMapIterator & operator++()
Definition: DenseMap.h:1259
llvm::DenseMapBase::FindAndConstruct
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:321
llvm::DenseMapBase::insert_as
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:273
llvm::AMDGPU::HSAMD::Kernel::Arg::Key::IsConst
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Definition: AMDGPUMetadata.h:190
llvm::DenseMap::init
void init(unsigned InitNumEntries)
Definition: DenseMap.h:792
this
Analysis the ScalarEvolution expression for r is this
Definition: README.txt:8
const_iterator
ValueT
Compiler.h
llvm::capacity_in_bytes
size_t capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:815
llvm::DenseMapBase::getTombstoneKey
static const KeyT getTombstoneKey()
Definition: DenseMap.h:459
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:922
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::DenseMapBase::empty
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:97
std
Definition: BitVector.h:838
llvm::SmallDenseMap::~SmallDenseMap
~SmallDenseMap()
Definition: DenseMap.h:927
type_traits.h
llvm::SmallDenseMap::operator=
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:1004
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::DebugEpochBase
Definition: EpochTracker.h:81
llvm::DenseMapBase::size
unsigned size() const
Definition: DenseMap.h:100
LLVM_NODISCARD
#define LLVM_NODISCARD
LLVM_NODISCARD - Warn if a type or return value is discarded.
Definition: Compiler.h:161
llvm::TargetStackID::Value
Value
Definition: TargetFrameLowering.h:27
llvm::SmallDenseMap::grow
void grow(unsigned AtLeast)
Definition: DenseMap.h:1038
llvm::DenseMapBase::erase
void erase(iterator I)
Definition: DenseMap.h:313
isEqual
static bool isEqual(const Function &Caller, const Function &Callee)
Definition: Attributes.cpp:2129
llvm::DebugEpochBase::incrementEpoch
void incrementEpoch()
Definition: EpochTracker.h:83
Lookup
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Definition: X86FloatingPoint.cpp:599
llvm::DenseMapBase::moveFromOldBuckets
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:395
llvm::DenseMapBase::initEmpty
void initEmpty()
Definition: DenseMap.h:373
llvm::DenseMapBase::iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:70
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:222
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::DenseMapBase::FindAndConstruct
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:333
LLVM_LIKELY
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:219
llvm::DenseMapBase::reserve
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:104
DenseMapInfo.h
EpochTracker.h
llvm::DenseMapBase::getHashValue
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:449
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
LLVM_UNLIKELY
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:220
llvm::DenseMapIterator::pointer
value_type * pointer
Definition: DenseMap.h:1197
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1797
llvm::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:496
llvm::DenseMapIterator::operator++
DenseMapIterator operator++(int)
Definition: DenseMap.h:1271
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::SmallDenseMap::SmallDenseMap
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:911
llvm::DenseMap::grow
void grow(unsigned AtLeast)
Definition: DenseMap.h:802
llvm::detail::DenseMapPair::getSecond
const ValueT & getSecond() const
Definition: DenseMap.h:47