1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the DenseMap class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_ADT_DENSEMAP_H
14 #define LLVM_ADT_DENSEMAP_H
15 
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/EpochTracker.h"
18 #include "llvm/Support/AlignOf.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/MathExtras.h"
21 #include "llvm/Support/MemAlloc.h"
22 #include "llvm/Support/ReverseIteration.h"
23 #include "llvm/Support/type_traits.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <cstddef>
27 #include <cstring>
28 #include <initializer_list>
29 #include <iterator>
30 #include <new>
31 #include <type_traits>
32 #include <utility>
33 
34 namespace llvm {
35 
36 namespace detail {
37 
38 // We extend a pair to allow users to override the bucket type with their own
39 // implementation without requiring two members.
40 template <typename KeyT, typename ValueT>
41 struct DenseMapPair : public std::pair<KeyT, ValueT> {
42   using std::pair<KeyT, ValueT>::pair;
43 
getFirstDenseMapPair44   KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
getFirstDenseMapPair45   const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
getSecondDenseMapPair46   ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
getSecondDenseMapPair47   const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
48 };
49 
50 } // end namespace detail
51 
52 template <typename KeyT, typename ValueT,
53           typename KeyInfoT = DenseMapInfo<KeyT>,
54           typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
55           bool IsConst = false>
56 class DenseMapIterator;
57 
58 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
59           typename BucketT>
60 class DenseMapBase : public DebugEpochBase {
61   template <typename T>
62   using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
63 
64 public:
65   using size_type = unsigned;
66   using key_type = KeyT;
67   using mapped_type = ValueT;
68   using value_type = BucketT;
69 
70   using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
71   using const_iterator =
72       DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
73 
begin()74   inline iterator begin() {
75     // When the map is empty, avoid the overhead of advancing/retreating past
76     // empty buckets.
77     if (empty())
78       return end();
79     if (shouldReverseIterate<KeyT>())
80       return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
81     return makeIterator(getBuckets(), getBucketsEnd(), *this);
82   }
end()83   inline iterator end() {
84     return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
85   }
begin()86   inline const_iterator begin() const {
87     if (empty())
88       return end();
89     if (shouldReverseIterate<KeyT>())
90       return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
91     return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
92   }
end()93   inline const_iterator end() const {
94     return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
95   }
96 
empty()97   LLVM_NODISCARD bool empty() const {
98     return getNumEntries() == 0;
99   }
size()100   unsigned size() const { return getNumEntries(); }
101 
102   /// Grow the densemap so that it can contain at least \p NumEntries items
103   /// before resizing again.
reserve(size_type NumEntries)104   void reserve(size_type NumEntries) {
105     auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
106     incrementEpoch();
107     if (NumBuckets > getNumBuckets())
108       grow(NumBuckets);
109   }
110 
clear()111   void clear() {
112     incrementEpoch();
113     if (getNumEntries() == 0 && getNumTombstones() == 0) return;
114 
115     // If the capacity of the array is huge, and the # elements used is small,
116     // shrink the array.
117     if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
118       shrink_and_clear();
119       return;
120     }
121 
122     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
123     if (std::is_trivially_destructible<ValueT>::value) {
124       // Use a simpler loop when values don't need destruction.
125       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
126         P->getFirst() = EmptyKey;
127     } else {
128       unsigned NumEntries = getNumEntries();
129       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
130         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
131           if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
132             P->getSecond().~ValueT();
133             --NumEntries;
134           }
135           P->getFirst() = EmptyKey;
136         }
137       }
138       assert(NumEntries == 0 && "Node count imbalance!");
139     }
140     setNumEntries(0);
141     setNumTombstones(0);
142   }
143 
144   /// Return 1 if the specified key is in the map, 0 otherwise.
count(const_arg_type_t<KeyT> Val)145   size_type count(const_arg_type_t<KeyT> Val) const {
146     const BucketT *TheBucket;
147     return LookupBucketFor(Val, TheBucket) ? 1 : 0;
148   }
149 
find(const_arg_type_t<KeyT> Val)150   iterator find(const_arg_type_t<KeyT> Val) {
151     BucketT *TheBucket;
152     if (LookupBucketFor(Val, TheBucket))
153       return makeIterator(TheBucket,
154                           shouldReverseIterate<KeyT>() ? getBuckets()
155                                                        : getBucketsEnd(),
156                           *this, true);
157     return end();
158   }
find(const_arg_type_t<KeyT> Val)159   const_iterator find(const_arg_type_t<KeyT> Val) const {
160     const BucketT *TheBucket;
161     if (LookupBucketFor(Val, TheBucket))
162       return makeConstIterator(TheBucket,
163                                shouldReverseIterate<KeyT>() ? getBuckets()
164                                                             : getBucketsEnd(),
165                                *this, true);
166     return end();
167   }
168 
169   /// Alternate version of find() which allows a different, and possibly
170   /// less expensive, key type.
171   /// The DenseMapInfo is responsible for supplying methods
172   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
173   /// type used.
174   template<class LookupKeyT>
find_as(const LookupKeyT & Val)175   iterator find_as(const LookupKeyT &Val) {
176     BucketT *TheBucket;
177     if (LookupBucketFor(Val, TheBucket))
178       return makeIterator(TheBucket,
179                           shouldReverseIterate<KeyT>() ? getBuckets()
180                                                        : getBucketsEnd(),
181                           *this, true);
182     return end();
183   }
184   template<class LookupKeyT>
find_as(const LookupKeyT & Val)185   const_iterator find_as(const LookupKeyT &Val) const {
186     const BucketT *TheBucket;
187     if (LookupBucketFor(Val, TheBucket))
188       return makeConstIterator(TheBucket,
189                                shouldReverseIterate<KeyT>() ? getBuckets()
190                                                             : getBucketsEnd(),
191                                *this, true);
192     return end();
193   }
194 
195   /// lookup - Return the entry for the specified key, or a default
196   /// constructed value if no such entry exists.
lookup(const_arg_type_t<KeyT> Val)197   ValueT lookup(const_arg_type_t<KeyT> Val) const {
198     const BucketT *TheBucket;
199     if (LookupBucketFor(Val, TheBucket))
200       return TheBucket->getSecond();
201     return ValueT();
202   }
203 
204   // Inserts key,value pair into the map if the key isn't already in the map.
205   // If the key is already in the map, it returns false and doesn't update the
206   // value.
insert(const std::pair<KeyT,ValueT> & KV)207   std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
208     return try_emplace(KV.first, KV.second);
209   }
210 
211   // Inserts key,value pair into the map if the key isn't already in the map.
212   // If the key is already in the map, it returns false and doesn't update the
213   // value.
insert(std::pair<KeyT,ValueT> && KV)214   std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
215     return try_emplace(std::move(KV.first), std::move(KV.second));
216   }
217 
218   // Inserts key,value pair into the map if the key isn't already in the map.
219   // The value is constructed in-place if the key is not in the map, otherwise
220   // it is not moved.
221   template <typename... Ts>
try_emplace(KeyT && Key,Ts &&...Args)222   std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
223     BucketT *TheBucket;
224     if (LookupBucketFor(Key, TheBucket))
225       return std::make_pair(makeIterator(TheBucket,
226                                          shouldReverseIterate<KeyT>()
227                                              ? getBuckets()
228                                              : getBucketsEnd(),
229                                          *this, true),
230                             false); // Already in map.
231 
232     // Otherwise, insert the new element.
233     TheBucket =
234         InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
235     return std::make_pair(makeIterator(TheBucket,
236                                        shouldReverseIterate<KeyT>()
237                                            ? getBuckets()
238                                            : getBucketsEnd(),
239                                        *this, true),
240                           true);
241   }
242 
243   // Inserts key,value pair into the map if the key isn't already in the map.
244   // The value is constructed in-place if the key is not in the map, otherwise
245   // it is not moved.
246   template <typename... Ts>
try_emplace(const KeyT & Key,Ts &&...Args)247   std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
248     BucketT *TheBucket;
249     if (LookupBucketFor(Key, TheBucket))
250       return std::make_pair(makeIterator(TheBucket,
251                                          shouldReverseIterate<KeyT>()
252                                              ? getBuckets()
253                                              : getBucketsEnd(),
254                                          *this, true),
255                             false); // Already in map.
256 
257     // Otherwise, insert the new element.
258     TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
259     return std::make_pair(makeIterator(TheBucket,
260                                        shouldReverseIterate<KeyT>()
261                                            ? getBuckets()
262                                            : getBucketsEnd(),
263                                        *this, true),
264                           true);
265   }
266 
267   /// Alternate version of insert() which allows a different, and possibly
268   /// less expensive, key type.
269   /// The DenseMapInfo is responsible for supplying methods
270   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
271   /// type used.
272   template <typename LookupKeyT>
insert_as(std::pair<KeyT,ValueT> && KV,const LookupKeyT & Val)273   std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
274                                       const LookupKeyT &Val) {
275     BucketT *TheBucket;
276     if (LookupBucketFor(Val, TheBucket))
277       return std::make_pair(makeIterator(TheBucket,
278                                          shouldReverseIterate<KeyT>()
279                                              ? getBuckets()
280                                              : getBucketsEnd(),
281                                          *this, true),
282                             false); // Already in map.
283 
284     // Otherwise, insert the new element.
285     TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
286                                            std::move(KV.second), Val);
287     return std::make_pair(makeIterator(TheBucket,
288                                        shouldReverseIterate<KeyT>()
289                                            ? getBuckets()
290                                            : getBucketsEnd(),
291                                        *this, true),
292                           true);
293   }
294 
295   /// insert - Range insertion of pairs.
296   template<typename InputIt>
insert(InputIt I,InputIt E)297   void insert(InputIt I, InputIt E) {
298     for (; I != E; ++I)
299       insert(*I);
300   }
301 
erase(const KeyT & Val)302   bool erase(const KeyT &Val) {
303     BucketT *TheBucket;
304     if (!LookupBucketFor(Val, TheBucket))
305       return false; // not in map.
306 
307     TheBucket->getSecond().~ValueT();
308     TheBucket->getFirst() = getTombstoneKey();
309     decrementNumEntries();
310     incrementNumTombstones();
311     return true;
312   }
erase(iterator I)313   void erase(iterator I) {
314     BucketT *TheBucket = &*I;
315     TheBucket->getSecond().~ValueT();
316     TheBucket->getFirst() = getTombstoneKey();
317     decrementNumEntries();
318     incrementNumTombstones();
319   }
320 
FindAndConstruct(const KeyT & Key)321   value_type& FindAndConstruct(const KeyT &Key) {
322     BucketT *TheBucket;
323     if (LookupBucketFor(Key, TheBucket))
324       return *TheBucket;
325 
326     return *InsertIntoBucket(TheBucket, Key);
327   }
328 
329   ValueT &operator[](const KeyT &Key) {
330     return FindAndConstruct(Key).second;
331   }
332 
FindAndConstruct(KeyT && Key)333   value_type& FindAndConstruct(KeyT &&Key) {
334     BucketT *TheBucket;
335     if (LookupBucketFor(Key, TheBucket))
336       return *TheBucket;
337 
338     return *InsertIntoBucket(TheBucket, std::move(Key));
339   }
340 
341   ValueT &operator[](KeyT &&Key) {
342     return FindAndConstruct(std::move(Key)).second;
343   }
344 
345   /// isPointerIntoBucketsArray - Return true if the specified pointer points
346   /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
347   /// value in the DenseMap).
isPointerIntoBucketsArray(const void * Ptr)348   bool isPointerIntoBucketsArray(const void *Ptr) const {
349     return Ptr >= getBuckets() && Ptr < getBucketsEnd();
350   }
351 
352   /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
353   /// array.  In conjunction with the previous method, this can be used to
354   /// determine whether an insertion caused the DenseMap to reallocate.
getPointerIntoBucketsArray()355   const void *getPointerIntoBucketsArray() const { return getBuckets(); }
356 
357 protected:
358   DenseMapBase() = default;
359 
destroyAll()360   void destroyAll() {
361     if (getNumBuckets() == 0) // Nothing to do.
362       return;
363 
364     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
365     for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
366       if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
367           !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
368         P->getSecond().~ValueT();
369       P->getFirst().~KeyT();
370     }
371   }
372 
initEmpty()373   void initEmpty() {
374     setNumEntries(0);
375     setNumTombstones(0);
376 
377     assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
378            "# initial buckets must be a power of two!");
379     const KeyT EmptyKey = getEmptyKey();
380     for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
381       ::new (&B->getFirst()) KeyT(EmptyKey);
382   }
383 
384   /// Returns the number of buckets to allocate to ensure that the DenseMap can
385   /// accommodate \p NumEntries without need to grow().
getMinBucketToReserveForEntries(unsigned NumEntries)386   unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
387     // Ensure that "NumEntries * 4 < NumBuckets * 3"
388     if (NumEntries == 0)
389       return 0;
390     // +1 is required because of the strict equality.
391     // For example if NumEntries is 48, we need to return 401.
392     return NextPowerOf2(NumEntries * 4 / 3 + 1);
393   }
394 
moveFromOldBuckets(BucketT * OldBucketsBegin,BucketT * OldBucketsEnd)395   void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
396     initEmpty();
397 
398     // Insert all the old elements.
399     const KeyT EmptyKey = getEmptyKey();
400     const KeyT TombstoneKey = getTombstoneKey();
401     for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
402       if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
403           !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
404         // Insert the key/value into the new table.
405         BucketT *DestBucket;
406         bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
407         (void)FoundVal; // silence warning.
408         assert(!FoundVal && "Key already in new map?");
409         DestBucket->getFirst() = std::move(B->getFirst());
410         ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
411         incrementNumEntries();
412 
413         // Free the value.
414         B->getSecond().~ValueT();
415       }
416       B->getFirst().~KeyT();
417     }
418   }
419 
420   template <typename OtherBaseT>
copyFrom(const DenseMapBase<OtherBaseT,KeyT,ValueT,KeyInfoT,BucketT> & other)421   void copyFrom(
422       const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
423     assert(&other != this);
424     assert(getNumBuckets() == other.getNumBuckets());
425 
426     setNumEntries(other.getNumEntries());
427     setNumTombstones(other.getNumTombstones());
428 
429     if (std::is_trivially_copyable<KeyT>::value &&
430         std::is_trivially_copyable<ValueT>::value)
431       memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
432              getNumBuckets() * sizeof(BucketT));
433     else
434       for (size_t i = 0; i < getNumBuckets(); ++i) {
435         ::new (&getBuckets()[i].getFirst())
436             KeyT(other.getBuckets()[i].getFirst());
437         if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
438             !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
439           ::new (&getBuckets()[i].getSecond())
440               ValueT(other.getBuckets()[i].getSecond());
441       }
442   }
443 
getHashValue(const KeyT & Val)444   static unsigned getHashValue(const KeyT &Val) {
445     return KeyInfoT::getHashValue(Val);
446   }
447 
448   template<typename LookupKeyT>
getHashValue(const LookupKeyT & Val)449   static unsigned getHashValue(const LookupKeyT &Val) {
450     return KeyInfoT::getHashValue(Val);
451   }
452 
getEmptyKey()453   static const KeyT getEmptyKey() {
454     static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
455                   "Must pass the derived type to this template!");
456     return KeyInfoT::getEmptyKey();
457   }
458 
getTombstoneKey()459   static const KeyT getTombstoneKey() {
460     return KeyInfoT::getTombstoneKey();
461   }
462 
463 private:
464   iterator makeIterator(BucketT *P, BucketT *E,
465                         DebugEpochBase &Epoch,
466                         bool NoAdvance=false) {
467     if (shouldReverseIterate<KeyT>()) {
468       BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
469       return iterator(B, E, Epoch, NoAdvance);
470     }
471     return iterator(P, E, Epoch, NoAdvance);
472   }
473 
474   const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
475                                    const DebugEpochBase &Epoch,
476                                    const bool NoAdvance=false) const {
477     if (shouldReverseIterate<KeyT>()) {
478       const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
479       return const_iterator(B, E, Epoch, NoAdvance);
480     }
481     return const_iterator(P, E, Epoch, NoAdvance);
482   }
483 
getNumEntries()484   unsigned getNumEntries() const {
485     return static_cast<const DerivedT *>(this)->getNumEntries();
486   }
487 
setNumEntries(unsigned Num)488   void setNumEntries(unsigned Num) {
489     static_cast<DerivedT *>(this)->setNumEntries(Num);
490   }
491 
incrementNumEntries()492   void incrementNumEntries() {
493     setNumEntries(getNumEntries() + 1);
494   }
495 
decrementNumEntries()496   void decrementNumEntries() {
497     setNumEntries(getNumEntries() - 1);
498   }
499 
getNumTombstones()500   unsigned getNumTombstones() const {
501     return static_cast<const DerivedT *>(this)->getNumTombstones();
502   }
503 
setNumTombstones(unsigned Num)504   void setNumTombstones(unsigned Num) {
505     static_cast<DerivedT *>(this)->setNumTombstones(Num);
506   }
507 
incrementNumTombstones()508   void incrementNumTombstones() {
509     setNumTombstones(getNumTombstones() + 1);
510   }
511 
decrementNumTombstones()512   void decrementNumTombstones() {
513     setNumTombstones(getNumTombstones() - 1);
514   }
515 
getBuckets()516   const BucketT *getBuckets() const {
517     return static_cast<const DerivedT *>(this)->getBuckets();
518   }
519 
getBuckets()520   BucketT *getBuckets() {
521     return static_cast<DerivedT *>(this)->getBuckets();
522   }
523 
getNumBuckets()524   unsigned getNumBuckets() const {
525     return static_cast<const DerivedT *>(this)->getNumBuckets();
526   }
527 
getBucketsEnd()528   BucketT *getBucketsEnd() {
529     return getBuckets() + getNumBuckets();
530   }
531 
getBucketsEnd()532   const BucketT *getBucketsEnd() const {
533     return getBuckets() + getNumBuckets();
534   }
535 
grow(unsigned AtLeast)536   void grow(unsigned AtLeast) {
537     static_cast<DerivedT *>(this)->grow(AtLeast);
538   }
539 
shrink_and_clear()540   void shrink_and_clear() {
541     static_cast<DerivedT *>(this)->shrink_and_clear();
542   }
543 
544   template <typename KeyArg, typename... ValueArgs>
InsertIntoBucket(BucketT * TheBucket,KeyArg && Key,ValueArgs &&...Values)545   BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
546                             ValueArgs &&... Values) {
547     TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
548 
549     TheBucket->getFirst() = std::forward<KeyArg>(Key);
550     ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
551     return TheBucket;
552   }
553 
554   template <typename LookupKeyT>
InsertIntoBucketWithLookup(BucketT * TheBucket,KeyT && Key,ValueT && Value,LookupKeyT & Lookup)555   BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
556                                       ValueT &&Value, LookupKeyT &Lookup) {
557     TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
558 
559     TheBucket->getFirst() = std::move(Key);
560     ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
561     return TheBucket;
562   }
563 
564   template <typename LookupKeyT>
InsertIntoBucketImpl(const KeyT & Key,const LookupKeyT & Lookup,BucketT * TheBucket)565   BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
566                                 BucketT *TheBucket) {
567     incrementEpoch();
568 
569     // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
570     // the buckets are empty (meaning that many are filled with tombstones),
571     // grow the table.
572     //
573     // The later case is tricky.  For example, if we had one empty bucket with
574     // tons of tombstones, failing lookups (e.g. for insertion) would have to
575     // probe almost the entire table until it found the empty bucket.  If the
576     // table completely filled with tombstones, no lookup would ever succeed,
577     // causing infinite loops in lookup.
578     unsigned NewNumEntries = getNumEntries() + 1;
579     unsigned NumBuckets = getNumBuckets();
580     if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
581       this->grow(NumBuckets * 2);
582       LookupBucketFor(Lookup, TheBucket);
583       NumBuckets = getNumBuckets();
584     } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
585                              NumBuckets/8)) {
586       this->grow(NumBuckets);
587       LookupBucketFor(Lookup, TheBucket);
588     }
589     assert(TheBucket);
590 
591     // Only update the state after we've grown our bucket space appropriately
592     // so that when growing buckets we have self-consistent entry count.
593     incrementNumEntries();
594 
595     // If we are writing over a tombstone, remember this.
596     const KeyT EmptyKey = getEmptyKey();
597     if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
598       decrementNumTombstones();
599 
600     return TheBucket;
601   }
602 
603   /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
604   /// FoundBucket.  If the bucket contains the key and a value, this returns
605   /// true, otherwise it returns a bucket with an empty marker or tombstone and
606   /// returns false.
607   template<typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,const BucketT * & FoundBucket)608   bool LookupBucketFor(const LookupKeyT &Val,
609                        const BucketT *&FoundBucket) const {
610     const BucketT *BucketsPtr = getBuckets();
611     const unsigned NumBuckets = getNumBuckets();
612 
613     if (NumBuckets == 0) {
614       FoundBucket = nullptr;
615       return false;
616     }
617 
618     // FoundTombstone - Keep track of whether we find a tombstone while probing.
619     const BucketT *FoundTombstone = nullptr;
620     const KeyT EmptyKey = getEmptyKey();
621     const KeyT TombstoneKey = getTombstoneKey();
622     assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
623            !KeyInfoT::isEqual(Val, TombstoneKey) &&
624            "Empty/Tombstone value shouldn't be inserted into map!");
625 
626     unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
627     unsigned ProbeAmt = 1;
628     while (true) {
629       const BucketT *ThisBucket = BucketsPtr + BucketNo;
630       // Found Val's bucket?  If so, return it.
631       if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
632         FoundBucket = ThisBucket;
633         return true;
634       }
635 
636       // If we found an empty bucket, the key doesn't exist in the set.
637       // Insert it and return the default value.
638       if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
639         // If we've already seen a tombstone while probing, fill it in instead
640         // of the empty bucket we eventually probed to.
641         FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
642         return false;
643       }
644 
645       // If this is a tombstone, remember it.  If Val ends up not in the map, we
646       // prefer to return it than something that would require more probing.
647       if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
648           !FoundTombstone)
649         FoundTombstone = ThisBucket;  // Remember the first tombstone found.
650 
651       // Otherwise, it's a hash collision or a tombstone, continue quadratic
652       // probing.
653       BucketNo += ProbeAmt++;
654       BucketNo &= (NumBuckets-1);
655     }
656   }
657 
658   template <typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,BucketT * & FoundBucket)659   bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
660     const BucketT *ConstFoundBucket;
661     bool Result = const_cast<const DenseMapBase *>(this)
662       ->LookupBucketFor(Val, ConstFoundBucket);
663     FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
664     return Result;
665   }
666 
667 public:
668   /// Return the approximate size (in bytes) of the actual map.
669   /// This is just the raw memory used by DenseMap.
670   /// If entries are pointers to objects, the size of the referenced objects
671   /// are not included.
getMemorySize()672   size_t getMemorySize() const {
673     return getNumBuckets() * sizeof(BucketT);
674   }
675 };
676 
677 /// Equality comparison for DenseMap.
678 ///
679 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
680 /// is also in RHS, and that no additional pairs are in RHS.
681 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
682 /// complexity is linear, worst case is O(N^2) (if every hash collides).
683 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
684           typename BucketT>
685 bool operator==(
686     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
687     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
688   if (LHS.size() != RHS.size())
689     return false;
690 
691   for (auto &KV : LHS) {
692     auto I = RHS.find(KV.first);
693     if (I == RHS.end() || I->second != KV.second)
694       return false;
695   }
696 
697   return true;
698 }
699 
700 /// Inequality comparison for DenseMap.
701 ///
702 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
703 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
704           typename BucketT>
705 bool operator!=(
706     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
707     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
708   return !(LHS == RHS);
709 }
710 
711 template <typename KeyT, typename ValueT,
712           typename KeyInfoT = DenseMapInfo<KeyT>,
713           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
714 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
715                                      KeyT, ValueT, KeyInfoT, BucketT> {
716   friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
717 
718   // Lift some types from the dependent base class into this class for
719   // simplicity of referring to them.
720   using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
721 
722   BucketT *Buckets;
723   unsigned NumEntries;
724   unsigned NumTombstones;
725   unsigned NumBuckets;
726 
727 public:
728   /// Create a DenseMap with an optional \p InitialReserve that guarantee that
729   /// this number of elements can be inserted in the map without grow()
730   explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
731 
DenseMap(const DenseMap & other)732   DenseMap(const DenseMap &other) : BaseT() {
733     init(0);
734     copyFrom(other);
735   }
736 
DenseMap(DenseMap && other)737   DenseMap(DenseMap &&other) : BaseT() {
738     init(0);
739     swap(other);
740   }
741 
742   template<typename InputIt>
DenseMap(const InputIt & I,const InputIt & E)743   DenseMap(const InputIt &I, const InputIt &E) {
744     init(std::distance(I, E));
745     this->insert(I, E);
746   }
747 
DenseMap(std::initializer_list<typename BaseT::value_type> Vals)748   DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
749     init(Vals.size());
750     this->insert(Vals.begin(), Vals.end());
751   }
752 
~DenseMap()753   ~DenseMap() {
754     this->destroyAll();
755     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
756   }
757 
swap(DenseMap & RHS)758   void swap(DenseMap& RHS) {
759     this->incrementEpoch();
760     RHS.incrementEpoch();
761     std::swap(Buckets, RHS.Buckets);
762     std::swap(NumEntries, RHS.NumEntries);
763     std::swap(NumTombstones, RHS.NumTombstones);
764     std::swap(NumBuckets, RHS.NumBuckets);
765   }
766 
767   DenseMap& operator=(const DenseMap& other) {
768     if (&other != this)
769       copyFrom(other);
770     return *this;
771   }
772 
773   DenseMap& operator=(DenseMap &&other) {
774     this->destroyAll();
775     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
776     init(0);
777     swap(other);
778     return *this;
779   }
780 
copyFrom(const DenseMap & other)781   void copyFrom(const DenseMap& other) {
782     this->destroyAll();
783     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
784     if (allocateBuckets(other.NumBuckets)) {
785       this->BaseT::copyFrom(other);
786     } else {
787       NumEntries = 0;
788       NumTombstones = 0;
789     }
790   }
791 
init(unsigned InitNumEntries)792   void init(unsigned InitNumEntries) {
793     auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
794     if (allocateBuckets(InitBuckets)) {
795       this->BaseT::initEmpty();
796     } else {
797       NumEntries = 0;
798       NumTombstones = 0;
799     }
800   }
801 
grow(unsigned AtLeast)802   void grow(unsigned AtLeast) {
803     unsigned OldNumBuckets = NumBuckets;
804     BucketT *OldBuckets = Buckets;
805 
806     allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
807     assert(Buckets);
808     if (!OldBuckets) {
809       this->BaseT::initEmpty();
810       return;
811     }
812 
813     this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
814 
815     // Free the old table.
816     deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
817                       alignof(BucketT));
818   }
819 
shrink_and_clear()820   void shrink_and_clear() {
821     unsigned OldNumBuckets = NumBuckets;
822     unsigned OldNumEntries = NumEntries;
823     this->destroyAll();
824 
825     // Reduce the number of buckets.
826     unsigned NewNumBuckets = 0;
827     if (OldNumEntries)
828       NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
829     if (NewNumBuckets == NumBuckets) {
830       this->BaseT::initEmpty();
831       return;
832     }
833 
834     deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
835                       alignof(BucketT));
836     init(NewNumBuckets);
837   }
838 
839 private:
getNumEntries()840   unsigned getNumEntries() const {
841     return NumEntries;
842   }
843 
setNumEntries(unsigned Num)844   void setNumEntries(unsigned Num) {
845     NumEntries = Num;
846   }
847 
getNumTombstones()848   unsigned getNumTombstones() const {
849     return NumTombstones;
850   }
851 
setNumTombstones(unsigned Num)852   void setNumTombstones(unsigned Num) {
853     NumTombstones = Num;
854   }
855 
getBuckets()856   BucketT *getBuckets() const {
857     return Buckets;
858   }
859 
getNumBuckets()860   unsigned getNumBuckets() const {
861     return NumBuckets;
862   }
863 
allocateBuckets(unsigned Num)864   bool allocateBuckets(unsigned Num) {
865     NumBuckets = Num;
866     if (NumBuckets == 0) {
867       Buckets = nullptr;
868       return false;
869     }
870 
871     Buckets = static_cast<BucketT *>(
872         allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
873     return true;
874   }
875 };
876 
877 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
878           typename KeyInfoT = DenseMapInfo<KeyT>,
879           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
880 class SmallDenseMap
881     : public DenseMapBase<
882           SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
883           ValueT, KeyInfoT, BucketT> {
884   friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
885 
886   // Lift some types from the dependent base class into this class for
887   // simplicity of referring to them.
888   using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
889 
890   static_assert(isPowerOf2_64(InlineBuckets),
891                 "InlineBuckets must be a power of 2.");
892 
893   unsigned Small : 1;
894   unsigned NumEntries : 31;
895   unsigned NumTombstones;
896 
897   struct LargeRep {
898     BucketT *Buckets;
899     unsigned NumBuckets;
900   };
901 
902   /// A "union" of an inline bucket array and the struct representing
903   /// a large bucket. This union will be discriminated by the 'Small' bit.
904   AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
905 
906 public:
907   explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
908     init(NumInitBuckets);
909   }
910 
SmallDenseMap(const SmallDenseMap & other)911   SmallDenseMap(const SmallDenseMap &other) : BaseT() {
912     init(0);
913     copyFrom(other);
914   }
915 
SmallDenseMap(SmallDenseMap && other)916   SmallDenseMap(SmallDenseMap &&other) : BaseT() {
917     init(0);
918     swap(other);
919   }
920 
921   template<typename InputIt>
SmallDenseMap(const InputIt & I,const InputIt & E)922   SmallDenseMap(const InputIt &I, const InputIt &E) {
923     init(NextPowerOf2(std::distance(I, E)));
924     this->insert(I, E);
925   }
926 
SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)927   SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
928       : SmallDenseMap(Vals.begin(), Vals.end()) {}
929 
~SmallDenseMap()930   ~SmallDenseMap() {
931     this->destroyAll();
932     deallocateBuckets();
933   }
934 
swap(SmallDenseMap & RHS)935   void swap(SmallDenseMap& RHS) {
936     unsigned TmpNumEntries = RHS.NumEntries;
937     RHS.NumEntries = NumEntries;
938     NumEntries = TmpNumEntries;
939     std::swap(NumTombstones, RHS.NumTombstones);
940 
941     const KeyT EmptyKey = this->getEmptyKey();
942     const KeyT TombstoneKey = this->getTombstoneKey();
943     if (Small && RHS.Small) {
944       // If we're swapping inline bucket arrays, we have to cope with some of
945       // the tricky bits of DenseMap's storage system: the buckets are not
946       // fully initialized. Thus we swap every key, but we may have
947       // a one-directional move of the value.
948       for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
949         BucketT *LHSB = &getInlineBuckets()[i],
950                 *RHSB = &RHS.getInlineBuckets()[i];
951         bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
952                             !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
953         bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
954                             !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
955         if (hasLHSValue && hasRHSValue) {
956           // Swap together if we can...
957           std::swap(*LHSB, *RHSB);
958           continue;
959         }
960         // Swap separately and handle any asymmetry.
961         std::swap(LHSB->getFirst(), RHSB->getFirst());
962         if (hasLHSValue) {
963           ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
964           LHSB->getSecond().~ValueT();
965         } else if (hasRHSValue) {
966           ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
967           RHSB->getSecond().~ValueT();
968         }
969       }
970       return;
971     }
972     if (!Small && !RHS.Small) {
973       std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
974       std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
975       return;
976     }
977 
978     SmallDenseMap &SmallSide = Small ? *this : RHS;
979     SmallDenseMap &LargeSide = Small ? RHS : *this;
980 
981     // First stash the large side's rep and move the small side across.
982     LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
983     LargeSide.getLargeRep()->~LargeRep();
984     LargeSide.Small = true;
985     // This is similar to the standard move-from-old-buckets, but the bucket
986     // count hasn't actually rotated in this case. So we have to carefully
987     // move construct the keys and values into their new locations, but there
988     // is no need to re-hash things.
989     for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
990       BucketT *NewB = &LargeSide.getInlineBuckets()[i],
991               *OldB = &SmallSide.getInlineBuckets()[i];
992       ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
993       OldB->getFirst().~KeyT();
994       if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
995           !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
996         ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
997         OldB->getSecond().~ValueT();
998       }
999     }
1000 
1001     // The hard part of moving the small buckets across is done, just move
1002     // the TmpRep into its new home.
1003     SmallSide.Small = false;
1004     new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1005   }
1006 
1007   SmallDenseMap& operator=(const SmallDenseMap& other) {
1008     if (&other != this)
1009       copyFrom(other);
1010     return *this;
1011   }
1012 
1013   SmallDenseMap& operator=(SmallDenseMap &&other) {
1014     this->destroyAll();
1015     deallocateBuckets();
1016     init(0);
1017     swap(other);
1018     return *this;
1019   }
1020 
copyFrom(const SmallDenseMap & other)1021   void copyFrom(const SmallDenseMap& other) {
1022     this->destroyAll();
1023     deallocateBuckets();
1024     Small = true;
1025     if (other.getNumBuckets() > InlineBuckets) {
1026       Small = false;
1027       new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1028     }
1029     this->BaseT::copyFrom(other);
1030   }
1031 
init(unsigned InitBuckets)1032   void init(unsigned InitBuckets) {
1033     Small = true;
1034     if (InitBuckets > InlineBuckets) {
1035       Small = false;
1036       new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1037     }
1038     this->BaseT::initEmpty();
1039   }
1040 
grow(unsigned AtLeast)1041   void grow(unsigned AtLeast) {
1042     if (AtLeast > InlineBuckets)
1043       AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1044 
1045     if (Small) {
1046       // First move the inline buckets into a temporary storage.
1047       AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1048       BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1049       BucketT *TmpEnd = TmpBegin;
1050 
1051       // Loop over the buckets, moving non-empty, non-tombstones into the
1052       // temporary storage. Have the loop move the TmpEnd forward as it goes.
1053       const KeyT EmptyKey = this->getEmptyKey();
1054       const KeyT TombstoneKey = this->getTombstoneKey();
1055       for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1056         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1057             !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1058           assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1059                  "Too many inline buckets!");
1060           ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1061           ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1062           ++TmpEnd;
1063           P->getSecond().~ValueT();
1064         }
1065         P->getFirst().~KeyT();
1066       }
1067 
1068       // AtLeast == InlineBuckets can happen if there are many tombstones,
1069       // and grow() is used to remove them. Usually we always switch to the
1070       // large rep here.
1071       if (AtLeast > InlineBuckets) {
1072         Small = false;
1073         new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1074       }
1075       this->moveFromOldBuckets(TmpBegin, TmpEnd);
1076       return;
1077     }
1078 
1079     LargeRep OldRep = std::move(*getLargeRep());
1080     getLargeRep()->~LargeRep();
1081     if (AtLeast <= InlineBuckets) {
1082       Small = true;
1083     } else {
1084       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1085     }
1086 
1087     this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1088 
1089     // Free the old table.
1090     deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1091                       alignof(BucketT));
1092   }
1093 
shrink_and_clear()1094   void shrink_and_clear() {
1095     unsigned OldSize = this->size();
1096     this->destroyAll();
1097 
1098     // Reduce the number of buckets.
1099     unsigned NewNumBuckets = 0;
1100     if (OldSize) {
1101       NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1102       if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1103         NewNumBuckets = 64;
1104     }
1105     if ((Small && NewNumBuckets <= InlineBuckets) ||
1106         (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1107       this->BaseT::initEmpty();
1108       return;
1109     }
1110 
1111     deallocateBuckets();
1112     init(NewNumBuckets);
1113   }
1114 
1115 private:
getNumEntries()1116   unsigned getNumEntries() const {
1117     return NumEntries;
1118   }
1119 
setNumEntries(unsigned Num)1120   void setNumEntries(unsigned Num) {
1121     // NumEntries is hardcoded to be 31 bits wide.
1122     assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1123     NumEntries = Num;
1124   }
1125 
getNumTombstones()1126   unsigned getNumTombstones() const {
1127     return NumTombstones;
1128   }
1129 
setNumTombstones(unsigned Num)1130   void setNumTombstones(unsigned Num) {
1131     NumTombstones = Num;
1132   }
1133 
getInlineBuckets()1134   const BucketT *getInlineBuckets() const {
1135     assert(Small);
1136     // Note that this cast does not violate aliasing rules as we assert that
1137     // the memory's dynamic type is the small, inline bucket buffer, and the
1138     // 'storage' is a POD containing a char buffer.
1139     return reinterpret_cast<const BucketT *>(&storage);
1140   }
1141 
getInlineBuckets()1142   BucketT *getInlineBuckets() {
1143     return const_cast<BucketT *>(
1144       const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1145   }
1146 
getLargeRep()1147   const LargeRep *getLargeRep() const {
1148     assert(!Small);
1149     // Note, same rule about aliasing as with getInlineBuckets.
1150     return reinterpret_cast<const LargeRep *>(&storage);
1151   }
1152 
getLargeRep()1153   LargeRep *getLargeRep() {
1154     return const_cast<LargeRep *>(
1155       const_cast<const SmallDenseMap *>(this)->getLargeRep());
1156   }
1157 
getBuckets()1158   const BucketT *getBuckets() const {
1159     return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1160   }
1161 
getBuckets()1162   BucketT *getBuckets() {
1163     return const_cast<BucketT *>(
1164       const_cast<const SmallDenseMap *>(this)->getBuckets());
1165   }
1166 
getNumBuckets()1167   unsigned getNumBuckets() const {
1168     return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1169   }
1170 
deallocateBuckets()1171   void deallocateBuckets() {
1172     if (Small)
1173       return;
1174 
1175     deallocate_buffer(getLargeRep()->Buckets,
1176                       sizeof(BucketT) * getLargeRep()->NumBuckets,
1177                       alignof(BucketT));
1178     getLargeRep()->~LargeRep();
1179   }
1180 
allocateBuckets(unsigned Num)1181   LargeRep allocateBuckets(unsigned Num) {
1182     assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1183     LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1184                         sizeof(BucketT) * Num, alignof(BucketT))),
1185                     Num};
1186     return Rep;
1187   }
1188 };
1189 
1190 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1191           bool IsConst>
1192 class DenseMapIterator : DebugEpochBase::HandleBase {
1193   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1194   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1195 
1196 public:
1197   using difference_type = ptrdiff_t;
1198   using value_type =
1199       typename std::conditional<IsConst, const Bucket, Bucket>::type;
1200   using pointer = value_type *;
1201   using reference = value_type &;
1202   using iterator_category = std::forward_iterator_tag;
1203 
1204 private:
1205   pointer Ptr = nullptr;
1206   pointer End = nullptr;
1207 
1208 public:
1209   DenseMapIterator() = default;
1210 
1211   DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1212                    bool NoAdvance = false)
1213       : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1214     assert(isHandleInSync() && "invalid construction!");
1215 
1216     if (NoAdvance) return;
1217     if (shouldReverseIterate<KeyT>()) {
1218       RetreatPastEmptyBuckets();
1219       return;
1220     }
1221     AdvancePastEmptyBuckets();
1222   }
1223 
1224   // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1225   // for const iterator destinations so it doesn't end up as a user defined copy
1226   // constructor.
1227   template <bool IsConstSrc,
1228             typename = std::enable_if_t<!IsConstSrc && IsConst>>
DenseMapIterator(const DenseMapIterator<KeyT,ValueT,KeyInfoT,Bucket,IsConstSrc> & I)1229   DenseMapIterator(
1230       const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1231       : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1232 
1233   reference operator*() const {
1234     assert(isHandleInSync() && "invalid iterator access!");
1235     assert(Ptr != End && "dereferencing end() iterator");
1236     if (shouldReverseIterate<KeyT>())
1237       return Ptr[-1];
1238     return *Ptr;
1239   }
1240   pointer operator->() const {
1241     assert(isHandleInSync() && "invalid iterator access!");
1242     assert(Ptr != End && "dereferencing end() iterator");
1243     if (shouldReverseIterate<KeyT>())
1244       return &(Ptr[-1]);
1245     return Ptr;
1246   }
1247 
1248   friend bool operator==(const DenseMapIterator &LHS,
1249                          const DenseMapIterator &RHS) {
1250     assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1251     assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1252     assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1253            "comparing incomparable iterators!");
1254     return LHS.Ptr == RHS.Ptr;
1255   }
1256 
1257   friend bool operator!=(const DenseMapIterator &LHS,
1258                          const DenseMapIterator &RHS) {
1259     return !(LHS == RHS);
1260   }
1261 
1262   inline DenseMapIterator& operator++() {  // Preincrement
1263     assert(isHandleInSync() && "invalid iterator access!");
1264     assert(Ptr != End && "incrementing end() iterator");
1265     if (shouldReverseIterate<KeyT>()) {
1266       --Ptr;
1267       RetreatPastEmptyBuckets();
1268       return *this;
1269     }
1270     ++Ptr;
1271     AdvancePastEmptyBuckets();
1272     return *this;
1273   }
1274   DenseMapIterator operator++(int) {  // Postincrement
1275     assert(isHandleInSync() && "invalid iterator access!");
1276     DenseMapIterator tmp = *this; ++*this; return tmp;
1277   }
1278 
1279 private:
AdvancePastEmptyBuckets()1280   void AdvancePastEmptyBuckets() {
1281     assert(Ptr <= End);
1282     const KeyT Empty = KeyInfoT::getEmptyKey();
1283     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1284 
1285     while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1286                           KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1287       ++Ptr;
1288   }
1289 
RetreatPastEmptyBuckets()1290   void RetreatPastEmptyBuckets() {
1291     assert(Ptr >= End);
1292     const KeyT Empty = KeyInfoT::getEmptyKey();
1293     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1294 
1295     while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1296                           KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1297       --Ptr;
1298   }
1299 };
1300 
1301 template <typename KeyT, typename ValueT, typename KeyInfoT>
capacity_in_bytes(const DenseMap<KeyT,ValueT,KeyInfoT> & X)1302 inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1303   return X.getMemorySize();
1304 }
1305 
1306 } // end namespace llvm
1307 
1308 #endif // LLVM_ADT_DENSEMAP_H
1309