1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file defines the DenseMap class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16 
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/MathExtras.h"
22 #include "llvm/Support/MemAlloc.h"
23 #include "llvm/Support/ReverseIteration.h"
24 #include "llvm/Support/type_traits.h"
25 #include <algorithm>
26 #include <cassert>
27 #include <cstddef>
28 #include <cstring>
29 #include <initializer_list>
30 #include <iterator>
31 #include <new>
32 #include <type_traits>
33 #include <utility>
34 
35 namespace llvm {
36 
37 namespace detail {
38 
39 // We extend a pair to allow users to override the bucket type with their own
40 // implementation without requiring two members.
41 template <typename KeyT, typename ValueT>
42 struct DenseMapPair : public std::pair<KeyT, ValueT> {
43   using std::pair<KeyT, ValueT>::pair;
44 
45   KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
46   const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47   ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48   const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49 };
50 
51 } // end namespace detail
52 
53 template <typename KeyT, typename ValueT,
54           typename KeyInfoT = DenseMapInfo<KeyT>,
55           typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
56           bool IsConst = false>
57 class DenseMapIterator;
58 
59 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60           typename BucketT>
61 class DenseMapBase : public DebugEpochBase {
62   template <typename T>
63   using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64 
65 public:
66   using size_type = unsigned;
67   using key_type = KeyT;
68   using mapped_type = ValueT;
69   using value_type = BucketT;
70 
71   using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
72   using const_iterator =
73       DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
74 
75   inline iterator begin() {
76     // When the map is empty, avoid the overhead of advancing/retreating past
77     // empty buckets.
78     if (empty())
79       return end();
80     if (shouldReverseIterate<KeyT>())
81       return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82     return makeIterator(getBuckets(), getBucketsEnd(), *this);
83   }
84   inline iterator end() {
85     return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86   }
87   inline const_iterator begin() const {
88     if (empty())
89       return end();
90     if (shouldReverseIterate<KeyT>())
91       return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92     return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93   }
94   inline const_iterator end() const {
95     return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96   }
97 
98   [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
99   unsigned size() const { return getNumEntries(); }
100 
101   /// Grow the densemap so that it can contain at least \p NumEntries items
102   /// before resizing again.
103   void reserve(size_type NumEntries) {
104     auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
105     incrementEpoch();
106     if (NumBuckets > getNumBuckets())
107       grow(NumBuckets);
108   }
109 
110   void clear() {
111     incrementEpoch();
112     if (getNumEntries() == 0 && getNumTombstones() == 0) return;
113 
114     // If the capacity of the array is huge, and the # elements used is small,
115     // shrink the array.
116     if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
117       shrink_and_clear();
118       return;
119     }
120 
121     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
122     if (std::is_trivially_destructible<ValueT>::value) {
123       // Use a simpler loop when values don't need destruction.
124       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
125         P->getFirst() = EmptyKey;
126     } else {
127       unsigned NumEntries = getNumEntries();
128       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
129         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
130           if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
131             P->getSecond().~ValueT();
132             --NumEntries;
133           }
134           P->getFirst() = EmptyKey;
135         }
136       }
137       assert(NumEntries == 0 && "Node count imbalance!");
138       (void)NumEntries;
139     }
140     setNumEntries(0);
141     setNumTombstones(0);
142   }
143 
144   /// Return true if the specified key is in the map, false otherwise.
145   bool contains(const_arg_type_t<KeyT> Val) const {
146     const BucketT *TheBucket;
147     return LookupBucketFor(Val, TheBucket);
148   }
149 
150   /// Return 1 if the specified key is in the map, 0 otherwise.
151   size_type count(const_arg_type_t<KeyT> Val) const {
152     return contains(Val) ? 1 : 0;
153   }
154 
155   iterator find(const_arg_type_t<KeyT> Val) {
156     BucketT *TheBucket;
157     if (LookupBucketFor(Val, TheBucket))
158       return makeIterator(TheBucket,
159                           shouldReverseIterate<KeyT>() ? getBuckets()
160                                                        : getBucketsEnd(),
161                           *this, true);
162     return end();
163   }
164   const_iterator find(const_arg_type_t<KeyT> Val) const {
165     const BucketT *TheBucket;
166     if (LookupBucketFor(Val, TheBucket))
167       return makeConstIterator(TheBucket,
168                                shouldReverseIterate<KeyT>() ? getBuckets()
169                                                             : getBucketsEnd(),
170                                *this, true);
171     return end();
172   }
173 
174   /// Alternate version of find() which allows a different, and possibly
175   /// less expensive, key type.
176   /// The DenseMapInfo is responsible for supplying methods
177   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
178   /// type used.
179   template<class LookupKeyT>
180   iterator find_as(const LookupKeyT &Val) {
181     BucketT *TheBucket;
182     if (LookupBucketFor(Val, TheBucket))
183       return makeIterator(TheBucket,
184                           shouldReverseIterate<KeyT>() ? getBuckets()
185                                                        : getBucketsEnd(),
186                           *this, true);
187     return end();
188   }
189   template<class LookupKeyT>
190   const_iterator find_as(const LookupKeyT &Val) const {
191     const BucketT *TheBucket;
192     if (LookupBucketFor(Val, TheBucket))
193       return makeConstIterator(TheBucket,
194                                shouldReverseIterate<KeyT>() ? getBuckets()
195                                                             : getBucketsEnd(),
196                                *this, true);
197     return end();
198   }
199 
200   /// lookup - Return the entry for the specified key, or a default
201   /// constructed value if no such entry exists.
202   ValueT lookup(const_arg_type_t<KeyT> Val) const {
203     const BucketT *TheBucket;
204     if (LookupBucketFor(Val, TheBucket))
205       return TheBucket->getSecond();
206     return ValueT();
207   }
208 
209   /// at - Return the entry for the specified key, or abort if no such
210   /// entry exists.
211   const ValueT &at(const_arg_type_t<KeyT> Val) const {
212     auto Iter = this->find(std::move(Val));
213     assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
214     return Iter->second;
215   }
216 
217   // Inserts key,value pair into the map if the key isn't already in the map.
218   // If the key is already in the map, it returns false and doesn't update the
219   // value.
220   std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
221     return try_emplace(KV.first, KV.second);
222   }
223 
224   // Inserts key,value pair into the map if the key isn't already in the map.
225   // If the key is already in the map, it returns false and doesn't update the
226   // value.
227   std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
228     return try_emplace(std::move(KV.first), std::move(KV.second));
229   }
230 
231   // Inserts key,value pair into the map if the key isn't already in the map.
232   // The value is constructed in-place if the key is not in the map, otherwise
233   // it is not moved.
234   template <typename... Ts>
235   std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
236     BucketT *TheBucket;
237     if (LookupBucketFor(Key, TheBucket))
238       return std::make_pair(makeIterator(TheBucket,
239                                          shouldReverseIterate<KeyT>()
240                                              ? getBuckets()
241                                              : getBucketsEnd(),
242                                          *this, true),
243                             false); // Already in map.
244 
245     // Otherwise, insert the new element.
246     TheBucket =
247         InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
248     return std::make_pair(makeIterator(TheBucket,
249                                        shouldReverseIterate<KeyT>()
250                                            ? getBuckets()
251                                            : getBucketsEnd(),
252                                        *this, true),
253                           true);
254   }
255 
256   // Inserts key,value pair into the map if the key isn't already in the map.
257   // The value is constructed in-place if the key is not in the map, otherwise
258   // it is not moved.
259   template <typename... Ts>
260   std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
261     BucketT *TheBucket;
262     if (LookupBucketFor(Key, TheBucket))
263       return std::make_pair(makeIterator(TheBucket,
264                                          shouldReverseIterate<KeyT>()
265                                              ? getBuckets()
266                                              : getBucketsEnd(),
267                                          *this, true),
268                             false); // Already in map.
269 
270     // Otherwise, insert the new element.
271     TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
272     return std::make_pair(makeIterator(TheBucket,
273                                        shouldReverseIterate<KeyT>()
274                                            ? getBuckets()
275                                            : getBucketsEnd(),
276                                        *this, true),
277                           true);
278   }
279 
280   /// Alternate version of insert() which allows a different, and possibly
281   /// less expensive, key type.
282   /// The DenseMapInfo is responsible for supplying methods
283   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
284   /// type used.
285   template <typename LookupKeyT>
286   std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
287                                       const LookupKeyT &Val) {
288     BucketT *TheBucket;
289     if (LookupBucketFor(Val, TheBucket))
290       return std::make_pair(makeIterator(TheBucket,
291                                          shouldReverseIterate<KeyT>()
292                                              ? getBuckets()
293                                              : getBucketsEnd(),
294                                          *this, true),
295                             false); // Already in map.
296 
297     // Otherwise, insert the new element.
298     TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
299                                            std::move(KV.second), Val);
300     return std::make_pair(makeIterator(TheBucket,
301                                        shouldReverseIterate<KeyT>()
302                                            ? getBuckets()
303                                            : getBucketsEnd(),
304                                        *this, true),
305                           true);
306   }
307 
308   /// insert - Range insertion of pairs.
309   template<typename InputIt>
310   void insert(InputIt I, InputIt E) {
311     for (; I != E; ++I)
312       insert(*I);
313   }
314 
315   /// Returns the value associated to the key in the map if it exists. If it
316   /// does not exist, emplace a default value for the key and returns a
317   /// reference to the newly created value.
318   ValueT &getOrInsertDefault(KeyT &&Key) {
319     return try_emplace(Key).first->second;
320   }
321 
322   /// Returns the value associated to the key in the map if it exists. If it
323   /// does not exist, emplace a default value for the key and returns a
324   /// reference to the newly created value.
325   ValueT &getOrInsertDefault(const KeyT &Key) {
326     return try_emplace(Key).first->second;
327   }
328 
329   bool erase(const KeyT &Val) {
330     BucketT *TheBucket;
331     if (!LookupBucketFor(Val, TheBucket))
332       return false; // not in map.
333 
334     TheBucket->getSecond().~ValueT();
335     TheBucket->getFirst() = getTombstoneKey();
336     decrementNumEntries();
337     incrementNumTombstones();
338     return true;
339   }
340   void erase(iterator I) {
341     BucketT *TheBucket = &*I;
342     TheBucket->getSecond().~ValueT();
343     TheBucket->getFirst() = getTombstoneKey();
344     decrementNumEntries();
345     incrementNumTombstones();
346   }
347 
348   value_type& FindAndConstruct(const KeyT &Key) {
349     BucketT *TheBucket;
350     if (LookupBucketFor(Key, TheBucket))
351       return *TheBucket;
352 
353     return *InsertIntoBucket(TheBucket, Key);
354   }
355 
356   ValueT &operator[](const KeyT &Key) {
357     return FindAndConstruct(Key).second;
358   }
359 
360   value_type& FindAndConstruct(KeyT &&Key) {
361     BucketT *TheBucket;
362     if (LookupBucketFor(Key, TheBucket))
363       return *TheBucket;
364 
365     return *InsertIntoBucket(TheBucket, std::move(Key));
366   }
367 
368   ValueT &operator[](KeyT &&Key) {
369     return FindAndConstruct(std::move(Key)).second;
370   }
371 
372   /// isPointerIntoBucketsArray - Return true if the specified pointer points
373   /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
374   /// value in the DenseMap).
375   bool isPointerIntoBucketsArray(const void *Ptr) const {
376     return Ptr >= getBuckets() && Ptr < getBucketsEnd();
377   }
378 
379   /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
380   /// array.  In conjunction with the previous method, this can be used to
381   /// determine whether an insertion caused the DenseMap to reallocate.
382   const void *getPointerIntoBucketsArray() const { return getBuckets(); }
383 
384 protected:
385   DenseMapBase() = default;
386 
387   void destroyAll() {
388     if (getNumBuckets() == 0) // Nothing to do.
389       return;
390 
391     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
392     for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
393       if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
394           !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
395         P->getSecond().~ValueT();
396       P->getFirst().~KeyT();
397     }
398   }
399 
400   void initEmpty() {
401     setNumEntries(0);
402     setNumTombstones(0);
403 
404     assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
405            "# initial buckets must be a power of two!");
406     const KeyT EmptyKey = getEmptyKey();
407     for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
408       ::new (&B->getFirst()) KeyT(EmptyKey);
409   }
410 
411   /// Returns the number of buckets to allocate to ensure that the DenseMap can
412   /// accommodate \p NumEntries without need to grow().
413   unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
414     // Ensure that "NumEntries * 4 < NumBuckets * 3"
415     if (NumEntries == 0)
416       return 0;
417     // +1 is required because of the strict equality.
418     // For example if NumEntries is 48, we need to return 401.
419     return NextPowerOf2(NumEntries * 4 / 3 + 1);
420   }
421 
422   void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
423     initEmpty();
424 
425     // Insert all the old elements.
426     const KeyT EmptyKey = getEmptyKey();
427     const KeyT TombstoneKey = getTombstoneKey();
428     for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
429       if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
430           !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
431         // Insert the key/value into the new table.
432         BucketT *DestBucket;
433         bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
434         (void)FoundVal; // silence warning.
435         assert(!FoundVal && "Key already in new map?");
436         DestBucket->getFirst() = std::move(B->getFirst());
437         ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
438         incrementNumEntries();
439 
440         // Free the value.
441         B->getSecond().~ValueT();
442       }
443       B->getFirst().~KeyT();
444     }
445   }
446 
447   template <typename OtherBaseT>
448   void copyFrom(
449       const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
450     assert(&other != this);
451     assert(getNumBuckets() == other.getNumBuckets());
452 
453     setNumEntries(other.getNumEntries());
454     setNumTombstones(other.getNumTombstones());
455 
456     if (std::is_trivially_copyable<KeyT>::value &&
457         std::is_trivially_copyable<ValueT>::value)
458       memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
459              getNumBuckets() * sizeof(BucketT));
460     else
461       for (size_t i = 0; i < getNumBuckets(); ++i) {
462         ::new (&getBuckets()[i].getFirst())
463             KeyT(other.getBuckets()[i].getFirst());
464         if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
465             !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
466           ::new (&getBuckets()[i].getSecond())
467               ValueT(other.getBuckets()[i].getSecond());
468       }
469   }
470 
471   static unsigned getHashValue(const KeyT &Val) {
472     return KeyInfoT::getHashValue(Val);
473   }
474 
475   template<typename LookupKeyT>
476   static unsigned getHashValue(const LookupKeyT &Val) {
477     return KeyInfoT::getHashValue(Val);
478   }
479 
480   static const KeyT getEmptyKey() {
481     static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
482                   "Must pass the derived type to this template!");
483     return KeyInfoT::getEmptyKey();
484   }
485 
486   static const KeyT getTombstoneKey() {
487     return KeyInfoT::getTombstoneKey();
488   }
489 
490 private:
491   iterator makeIterator(BucketT *P, BucketT *E,
492                         DebugEpochBase &Epoch,
493                         bool NoAdvance=false) {
494     if (shouldReverseIterate<KeyT>()) {
495       BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
496       return iterator(B, E, Epoch, NoAdvance);
497     }
498     return iterator(P, E, Epoch, NoAdvance);
499   }
500 
501   const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
502                                    const DebugEpochBase &Epoch,
503                                    const bool NoAdvance=false) const {
504     if (shouldReverseIterate<KeyT>()) {
505       const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
506       return const_iterator(B, E, Epoch, NoAdvance);
507     }
508     return const_iterator(P, E, Epoch, NoAdvance);
509   }
510 
511   unsigned getNumEntries() const {
512     return static_cast<const DerivedT *>(this)->getNumEntries();
513   }
514 
515   void setNumEntries(unsigned Num) {
516     static_cast<DerivedT *>(this)->setNumEntries(Num);
517   }
518 
519   void incrementNumEntries() {
520     setNumEntries(getNumEntries() + 1);
521   }
522 
523   void decrementNumEntries() {
524     setNumEntries(getNumEntries() - 1);
525   }
526 
527   unsigned getNumTombstones() const {
528     return static_cast<const DerivedT *>(this)->getNumTombstones();
529   }
530 
531   void setNumTombstones(unsigned Num) {
532     static_cast<DerivedT *>(this)->setNumTombstones(Num);
533   }
534 
535   void incrementNumTombstones() {
536     setNumTombstones(getNumTombstones() + 1);
537   }
538 
539   void decrementNumTombstones() {
540     setNumTombstones(getNumTombstones() - 1);
541   }
542 
543   const BucketT *getBuckets() const {
544     return static_cast<const DerivedT *>(this)->getBuckets();
545   }
546 
547   BucketT *getBuckets() {
548     return static_cast<DerivedT *>(this)->getBuckets();
549   }
550 
551   unsigned getNumBuckets() const {
552     return static_cast<const DerivedT *>(this)->getNumBuckets();
553   }
554 
555   BucketT *getBucketsEnd() {
556     return getBuckets() + getNumBuckets();
557   }
558 
559   const BucketT *getBucketsEnd() const {
560     return getBuckets() + getNumBuckets();
561   }
562 
563   void grow(unsigned AtLeast) {
564     static_cast<DerivedT *>(this)->grow(AtLeast);
565   }
566 
567   void shrink_and_clear() {
568     static_cast<DerivedT *>(this)->shrink_and_clear();
569   }
570 
571   template <typename KeyArg, typename... ValueArgs>
572   BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
573                             ValueArgs &&... Values) {
574     TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
575 
576     TheBucket->getFirst() = std::forward<KeyArg>(Key);
577     ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
578     return TheBucket;
579   }
580 
581   template <typename LookupKeyT>
582   BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
583                                       ValueT &&Value, LookupKeyT &Lookup) {
584     TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
585 
586     TheBucket->getFirst() = std::move(Key);
587     ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
588     return TheBucket;
589   }
590 
591   template <typename LookupKeyT>
592   BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
593                                 BucketT *TheBucket) {
594     incrementEpoch();
595 
596     // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
597     // the buckets are empty (meaning that many are filled with tombstones),
598     // grow the table.
599     //
600     // The later case is tricky.  For example, if we had one empty bucket with
601     // tons of tombstones, failing lookups (e.g. for insertion) would have to
602     // probe almost the entire table until it found the empty bucket.  If the
603     // table completely filled with tombstones, no lookup would ever succeed,
604     // causing infinite loops in lookup.
605     unsigned NewNumEntries = getNumEntries() + 1;
606     unsigned NumBuckets = getNumBuckets();
607     if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
608       this->grow(NumBuckets * 2);
609       LookupBucketFor(Lookup, TheBucket);
610       NumBuckets = getNumBuckets();
611     } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
612                              NumBuckets/8)) {
613       this->grow(NumBuckets);
614       LookupBucketFor(Lookup, TheBucket);
615     }
616     assert(TheBucket);
617 
618     // Only update the state after we've grown our bucket space appropriately
619     // so that when growing buckets we have self-consistent entry count.
620     incrementNumEntries();
621 
622     // If we are writing over a tombstone, remember this.
623     const KeyT EmptyKey = getEmptyKey();
624     if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
625       decrementNumTombstones();
626 
627     return TheBucket;
628   }
629 
630   /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
631   /// FoundBucket.  If the bucket contains the key and a value, this returns
632   /// true, otherwise it returns a bucket with an empty marker or tombstone and
633   /// returns false.
634   template<typename LookupKeyT>
635   bool LookupBucketFor(const LookupKeyT &Val,
636                        const BucketT *&FoundBucket) const {
637     const BucketT *BucketsPtr = getBuckets();
638     const unsigned NumBuckets = getNumBuckets();
639 
640     if (NumBuckets == 0) {
641       FoundBucket = nullptr;
642       return false;
643     }
644 
645     // FoundTombstone - Keep track of whether we find a tombstone while probing.
646     const BucketT *FoundTombstone = nullptr;
647     const KeyT EmptyKey = getEmptyKey();
648     const KeyT TombstoneKey = getTombstoneKey();
649     assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
650            !KeyInfoT::isEqual(Val, TombstoneKey) &&
651            "Empty/Tombstone value shouldn't be inserted into map!");
652 
653     unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
654     unsigned ProbeAmt = 1;
655     while (true) {
656       const BucketT *ThisBucket = BucketsPtr + BucketNo;
657       // Found Val's bucket?  If so, return it.
658       if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
659         FoundBucket = ThisBucket;
660         return true;
661       }
662 
663       // If we found an empty bucket, the key doesn't exist in the set.
664       // Insert it and return the default value.
665       if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
666         // If we've already seen a tombstone while probing, fill it in instead
667         // of the empty bucket we eventually probed to.
668         FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
669         return false;
670       }
671 
672       // If this is a tombstone, remember it.  If Val ends up not in the map, we
673       // prefer to return it than something that would require more probing.
674       if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
675           !FoundTombstone)
676         FoundTombstone = ThisBucket;  // Remember the first tombstone found.
677 
678       // Otherwise, it's a hash collision or a tombstone, continue quadratic
679       // probing.
680       BucketNo += ProbeAmt++;
681       BucketNo &= (NumBuckets-1);
682     }
683   }
684 
685   template <typename LookupKeyT>
686   bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
687     const BucketT *ConstFoundBucket;
688     bool Result = const_cast<const DenseMapBase *>(this)
689       ->LookupBucketFor(Val, ConstFoundBucket);
690     FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
691     return Result;
692   }
693 
694 public:
695   /// Return the approximate size (in bytes) of the actual map.
696   /// This is just the raw memory used by DenseMap.
697   /// If entries are pointers to objects, the size of the referenced objects
698   /// are not included.
699   size_t getMemorySize() const {
700     return getNumBuckets() * sizeof(BucketT);
701   }
702 };
703 
704 /// Equality comparison for DenseMap.
705 ///
706 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
707 /// is also in RHS, and that no additional pairs are in RHS.
708 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
709 /// complexity is linear, worst case is O(N^2) (if every hash collides).
710 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
711           typename BucketT>
712 bool operator==(
713     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
714     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
715   if (LHS.size() != RHS.size())
716     return false;
717 
718   for (auto &KV : LHS) {
719     auto I = RHS.find(KV.first);
720     if (I == RHS.end() || I->second != KV.second)
721       return false;
722   }
723 
724   return true;
725 }
726 
727 /// Inequality comparison for DenseMap.
728 ///
729 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
730 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
731           typename BucketT>
732 bool operator!=(
733     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
734     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
735   return !(LHS == RHS);
736 }
737 
738 template <typename KeyT, typename ValueT,
739           typename KeyInfoT = DenseMapInfo<KeyT>,
740           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
741 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
742                                      KeyT, ValueT, KeyInfoT, BucketT> {
743   friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
744 
745   // Lift some types from the dependent base class into this class for
746   // simplicity of referring to them.
747   using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
748 
749   BucketT *Buckets;
750   unsigned NumEntries;
751   unsigned NumTombstones;
752   unsigned NumBuckets;
753 
754 public:
755   /// Create a DenseMap with an optional \p InitialReserve that guarantee that
756   /// this number of elements can be inserted in the map without grow()
757   explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
758 
759   DenseMap(const DenseMap &other) : BaseT() {
760     init(0);
761     copyFrom(other);
762   }
763 
764   DenseMap(DenseMap &&other) : BaseT() {
765     init(0);
766     swap(other);
767   }
768 
769   template<typename InputIt>
770   DenseMap(const InputIt &I, const InputIt &E) {
771     init(std::distance(I, E));
772     this->insert(I, E);
773   }
774 
775   DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
776     init(Vals.size());
777     this->insert(Vals.begin(), Vals.end());
778   }
779 
780   ~DenseMap() {
781     this->destroyAll();
782     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
783   }
784 
785   void swap(DenseMap& RHS) {
786     this->incrementEpoch();
787     RHS.incrementEpoch();
788     std::swap(Buckets, RHS.Buckets);
789     std::swap(NumEntries, RHS.NumEntries);
790     std::swap(NumTombstones, RHS.NumTombstones);
791     std::swap(NumBuckets, RHS.NumBuckets);
792   }
793 
794   DenseMap& operator=(const DenseMap& other) {
795     if (&other != this)
796       copyFrom(other);
797     return *this;
798   }
799 
800   DenseMap& operator=(DenseMap &&other) {
801     this->destroyAll();
802     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
803     init(0);
804     swap(other);
805     return *this;
806   }
807 
808   void copyFrom(const DenseMap& other) {
809     this->destroyAll();
810     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
811     if (allocateBuckets(other.NumBuckets)) {
812       this->BaseT::copyFrom(other);
813     } else {
814       NumEntries = 0;
815       NumTombstones = 0;
816     }
817   }
818 
819   void init(unsigned InitNumEntries) {
820     auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
821     if (allocateBuckets(InitBuckets)) {
822       this->BaseT::initEmpty();
823     } else {
824       NumEntries = 0;
825       NumTombstones = 0;
826     }
827   }
828 
829   void grow(unsigned AtLeast) {
830     unsigned OldNumBuckets = NumBuckets;
831     BucketT *OldBuckets = Buckets;
832 
833     allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
834     assert(Buckets);
835     if (!OldBuckets) {
836       this->BaseT::initEmpty();
837       return;
838     }
839 
840     this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
841 
842     // Free the old table.
843     deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
844                       alignof(BucketT));
845   }
846 
847   void shrink_and_clear() {
848     unsigned OldNumBuckets = NumBuckets;
849     unsigned OldNumEntries = NumEntries;
850     this->destroyAll();
851 
852     // Reduce the number of buckets.
853     unsigned NewNumBuckets = 0;
854     if (OldNumEntries)
855       NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
856     if (NewNumBuckets == NumBuckets) {
857       this->BaseT::initEmpty();
858       return;
859     }
860 
861     deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
862                       alignof(BucketT));
863     init(NewNumBuckets);
864   }
865 
866 private:
867   unsigned getNumEntries() const {
868     return NumEntries;
869   }
870 
871   void setNumEntries(unsigned Num) {
872     NumEntries = Num;
873   }
874 
875   unsigned getNumTombstones() const {
876     return NumTombstones;
877   }
878 
879   void setNumTombstones(unsigned Num) {
880     NumTombstones = Num;
881   }
882 
883   BucketT *getBuckets() const {
884     return Buckets;
885   }
886 
887   unsigned getNumBuckets() const {
888     return NumBuckets;
889   }
890 
891   bool allocateBuckets(unsigned Num) {
892     NumBuckets = Num;
893     if (NumBuckets == 0) {
894       Buckets = nullptr;
895       return false;
896     }
897 
898     Buckets = static_cast<BucketT *>(
899         allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
900     return true;
901   }
902 };
903 
904 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
905           typename KeyInfoT = DenseMapInfo<KeyT>,
906           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
907 class SmallDenseMap
908     : public DenseMapBase<
909           SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
910           ValueT, KeyInfoT, BucketT> {
911   friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
912 
913   // Lift some types from the dependent base class into this class for
914   // simplicity of referring to them.
915   using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
916 
917   static_assert(isPowerOf2_64(InlineBuckets),
918                 "InlineBuckets must be a power of 2.");
919 
920   unsigned Small : 1;
921   unsigned NumEntries : 31;
922   unsigned NumTombstones;
923 
924   struct LargeRep {
925     BucketT *Buckets;
926     unsigned NumBuckets;
927   };
928 
929   /// A "union" of an inline bucket array and the struct representing
930   /// a large bucket. This union will be discriminated by the 'Small' bit.
931   AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
932 
933 public:
934   explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
935     if (NumInitBuckets > InlineBuckets)
936       NumInitBuckets = llvm::bit_ceil(NumInitBuckets);
937     init(NumInitBuckets);
938   }
939 
940   SmallDenseMap(const SmallDenseMap &other) : BaseT() {
941     init(0);
942     copyFrom(other);
943   }
944 
945   SmallDenseMap(SmallDenseMap &&other) : BaseT() {
946     init(0);
947     swap(other);
948   }
949 
950   template<typename InputIt>
951   SmallDenseMap(const InputIt &I, const InputIt &E) {
952     init(NextPowerOf2(std::distance(I, E)));
953     this->insert(I, E);
954   }
955 
956   SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
957       : SmallDenseMap(Vals.begin(), Vals.end()) {}
958 
959   ~SmallDenseMap() {
960     this->destroyAll();
961     deallocateBuckets();
962   }
963 
964   void swap(SmallDenseMap& RHS) {
965     unsigned TmpNumEntries = RHS.NumEntries;
966     RHS.NumEntries = NumEntries;
967     NumEntries = TmpNumEntries;
968     std::swap(NumTombstones, RHS.NumTombstones);
969 
970     const KeyT EmptyKey = this->getEmptyKey();
971     const KeyT TombstoneKey = this->getTombstoneKey();
972     if (Small && RHS.Small) {
973       // If we're swapping inline bucket arrays, we have to cope with some of
974       // the tricky bits of DenseMap's storage system: the buckets are not
975       // fully initialized. Thus we swap every key, but we may have
976       // a one-directional move of the value.
977       for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
978         BucketT *LHSB = &getInlineBuckets()[i],
979                 *RHSB = &RHS.getInlineBuckets()[i];
980         bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
981                             !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
982         bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
983                             !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
984         if (hasLHSValue && hasRHSValue) {
985           // Swap together if we can...
986           std::swap(*LHSB, *RHSB);
987           continue;
988         }
989         // Swap separately and handle any asymmetry.
990         std::swap(LHSB->getFirst(), RHSB->getFirst());
991         if (hasLHSValue) {
992           ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
993           LHSB->getSecond().~ValueT();
994         } else if (hasRHSValue) {
995           ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
996           RHSB->getSecond().~ValueT();
997         }
998       }
999       return;
1000     }
1001     if (!Small && !RHS.Small) {
1002       std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
1003       std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
1004       return;
1005     }
1006 
1007     SmallDenseMap &SmallSide = Small ? *this : RHS;
1008     SmallDenseMap &LargeSide = Small ? RHS : *this;
1009 
1010     // First stash the large side's rep and move the small side across.
1011     LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
1012     LargeSide.getLargeRep()->~LargeRep();
1013     LargeSide.Small = true;
1014     // This is similar to the standard move-from-old-buckets, but the bucket
1015     // count hasn't actually rotated in this case. So we have to carefully
1016     // move construct the keys and values into their new locations, but there
1017     // is no need to re-hash things.
1018     for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1019       BucketT *NewB = &LargeSide.getInlineBuckets()[i],
1020               *OldB = &SmallSide.getInlineBuckets()[i];
1021       ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
1022       OldB->getFirst().~KeyT();
1023       if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
1024           !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1025         ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1026         OldB->getSecond().~ValueT();
1027       }
1028     }
1029 
1030     // The hard part of moving the small buckets across is done, just move
1031     // the TmpRep into its new home.
1032     SmallSide.Small = false;
1033     new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1034   }
1035 
1036   SmallDenseMap& operator=(const SmallDenseMap& other) {
1037     if (&other != this)
1038       copyFrom(other);
1039     return *this;
1040   }
1041 
1042   SmallDenseMap& operator=(SmallDenseMap &&other) {
1043     this->destroyAll();
1044     deallocateBuckets();
1045     init(0);
1046     swap(other);
1047     return *this;
1048   }
1049 
1050   void copyFrom(const SmallDenseMap& other) {
1051     this->destroyAll();
1052     deallocateBuckets();
1053     Small = true;
1054     if (other.getNumBuckets() > InlineBuckets) {
1055       Small = false;
1056       new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1057     }
1058     this->BaseT::copyFrom(other);
1059   }
1060 
1061   void init(unsigned InitBuckets) {
1062     Small = true;
1063     if (InitBuckets > InlineBuckets) {
1064       Small = false;
1065       new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1066     }
1067     this->BaseT::initEmpty();
1068   }
1069 
1070   void grow(unsigned AtLeast) {
1071     if (AtLeast > InlineBuckets)
1072       AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1073 
1074     if (Small) {
1075       // First move the inline buckets into a temporary storage.
1076       AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1077       BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1078       BucketT *TmpEnd = TmpBegin;
1079 
1080       // Loop over the buckets, moving non-empty, non-tombstones into the
1081       // temporary storage. Have the loop move the TmpEnd forward as it goes.
1082       const KeyT EmptyKey = this->getEmptyKey();
1083       const KeyT TombstoneKey = this->getTombstoneKey();
1084       for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1085         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1086             !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1087           assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1088                  "Too many inline buckets!");
1089           ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1090           ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1091           ++TmpEnd;
1092           P->getSecond().~ValueT();
1093         }
1094         P->getFirst().~KeyT();
1095       }
1096 
1097       // AtLeast == InlineBuckets can happen if there are many tombstones,
1098       // and grow() is used to remove them. Usually we always switch to the
1099       // large rep here.
1100       if (AtLeast > InlineBuckets) {
1101         Small = false;
1102         new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1103       }
1104       this->moveFromOldBuckets(TmpBegin, TmpEnd);
1105       return;
1106     }
1107 
1108     LargeRep OldRep = std::move(*getLargeRep());
1109     getLargeRep()->~LargeRep();
1110     if (AtLeast <= InlineBuckets) {
1111       Small = true;
1112     } else {
1113       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1114     }
1115 
1116     this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1117 
1118     // Free the old table.
1119     deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1120                       alignof(BucketT));
1121   }
1122 
1123   void shrink_and_clear() {
1124     unsigned OldSize = this->size();
1125     this->destroyAll();
1126 
1127     // Reduce the number of buckets.
1128     unsigned NewNumBuckets = 0;
1129     if (OldSize) {
1130       NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1131       if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1132         NewNumBuckets = 64;
1133     }
1134     if ((Small && NewNumBuckets <= InlineBuckets) ||
1135         (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1136       this->BaseT::initEmpty();
1137       return;
1138     }
1139 
1140     deallocateBuckets();
1141     init(NewNumBuckets);
1142   }
1143 
1144 private:
1145   unsigned getNumEntries() const {
1146     return NumEntries;
1147   }
1148 
1149   void setNumEntries(unsigned Num) {
1150     // NumEntries is hardcoded to be 31 bits wide.
1151     assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1152     NumEntries = Num;
1153   }
1154 
1155   unsigned getNumTombstones() const {
1156     return NumTombstones;
1157   }
1158 
1159   void setNumTombstones(unsigned Num) {
1160     NumTombstones = Num;
1161   }
1162 
1163   const BucketT *getInlineBuckets() const {
1164     assert(Small);
1165     // Note that this cast does not violate aliasing rules as we assert that
1166     // the memory's dynamic type is the small, inline bucket buffer, and the
1167     // 'storage' is a POD containing a char buffer.
1168     return reinterpret_cast<const BucketT *>(&storage);
1169   }
1170 
1171   BucketT *getInlineBuckets() {
1172     return const_cast<BucketT *>(
1173       const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1174   }
1175 
1176   const LargeRep *getLargeRep() const {
1177     assert(!Small);
1178     // Note, same rule about aliasing as with getInlineBuckets.
1179     return reinterpret_cast<const LargeRep *>(&storage);
1180   }
1181 
1182   LargeRep *getLargeRep() {
1183     return const_cast<LargeRep *>(
1184       const_cast<const SmallDenseMap *>(this)->getLargeRep());
1185   }
1186 
1187   const BucketT *getBuckets() const {
1188     return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1189   }
1190 
1191   BucketT *getBuckets() {
1192     return const_cast<BucketT *>(
1193       const_cast<const SmallDenseMap *>(this)->getBuckets());
1194   }
1195 
1196   unsigned getNumBuckets() const {
1197     return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1198   }
1199 
1200   void deallocateBuckets() {
1201     if (Small)
1202       return;
1203 
1204     deallocate_buffer(getLargeRep()->Buckets,
1205                       sizeof(BucketT) * getLargeRep()->NumBuckets,
1206                       alignof(BucketT));
1207     getLargeRep()->~LargeRep();
1208   }
1209 
1210   LargeRep allocateBuckets(unsigned Num) {
1211     assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1212     LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1213                         sizeof(BucketT) * Num, alignof(BucketT))),
1214                     Num};
1215     return Rep;
1216   }
1217 };
1218 
1219 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1220           bool IsConst>
1221 class DenseMapIterator : DebugEpochBase::HandleBase {
1222   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1223   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1224 
1225 public:
1226   using difference_type = ptrdiff_t;
1227   using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1228   using pointer = value_type *;
1229   using reference = value_type &;
1230   using iterator_category = std::forward_iterator_tag;
1231 
1232 private:
1233   pointer Ptr = nullptr;
1234   pointer End = nullptr;
1235 
1236 public:
1237   DenseMapIterator() = default;
1238 
1239   DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1240                    bool NoAdvance = false)
1241       : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1242     assert(isHandleInSync() && "invalid construction!");
1243 
1244     if (NoAdvance) return;
1245     if (shouldReverseIterate<KeyT>()) {
1246       RetreatPastEmptyBuckets();
1247       return;
1248     }
1249     AdvancePastEmptyBuckets();
1250   }
1251 
1252   // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1253   // for const iterator destinations so it doesn't end up as a user defined copy
1254   // constructor.
1255   template <bool IsConstSrc,
1256             typename = std::enable_if_t<!IsConstSrc && IsConst>>
1257   DenseMapIterator(
1258       const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1259       : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1260 
1261   reference operator*() const {
1262     assert(isHandleInSync() && "invalid iterator access!");
1263     assert(Ptr != End && "dereferencing end() iterator");
1264     if (shouldReverseIterate<KeyT>())
1265       return Ptr[-1];
1266     return *Ptr;
1267   }
1268   pointer operator->() const {
1269     assert(isHandleInSync() && "invalid iterator access!");
1270     assert(Ptr != End && "dereferencing end() iterator");
1271     if (shouldReverseIterate<KeyT>())
1272       return &(Ptr[-1]);
1273     return Ptr;
1274   }
1275 
1276   friend bool operator==(const DenseMapIterator &LHS,
1277                          const DenseMapIterator &RHS) {
1278     assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1279     assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1280     assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1281            "comparing incomparable iterators!");
1282     return LHS.Ptr == RHS.Ptr;
1283   }
1284 
1285   friend bool operator!=(const DenseMapIterator &LHS,
1286                          const DenseMapIterator &RHS) {
1287     return !(LHS == RHS);
1288   }
1289 
1290   inline DenseMapIterator& operator++() {  // Preincrement
1291     assert(isHandleInSync() && "invalid iterator access!");
1292     assert(Ptr != End && "incrementing end() iterator");
1293     if (shouldReverseIterate<KeyT>()) {
1294       --Ptr;
1295       RetreatPastEmptyBuckets();
1296       return *this;
1297     }
1298     ++Ptr;
1299     AdvancePastEmptyBuckets();
1300     return *this;
1301   }
1302   DenseMapIterator operator++(int) {  // Postincrement
1303     assert(isHandleInSync() && "invalid iterator access!");
1304     DenseMapIterator tmp = *this; ++*this; return tmp;
1305   }
1306 
1307 private:
1308   void AdvancePastEmptyBuckets() {
1309     assert(Ptr <= End);
1310     const KeyT Empty = KeyInfoT::getEmptyKey();
1311     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1312 
1313     while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1314                           KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1315       ++Ptr;
1316   }
1317 
1318   void RetreatPastEmptyBuckets() {
1319     assert(Ptr >= End);
1320     const KeyT Empty = KeyInfoT::getEmptyKey();
1321     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1322 
1323     while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1324                           KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1325       --Ptr;
1326   }
1327 };
1328 
1329 template <typename KeyT, typename ValueT, typename KeyInfoT>
1330 inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1331   return X.getMemorySize();
1332 }
1333 
1334 } // end namespace llvm
1335 
1336 #endif // LLVM_ADT_DENSEMAP_H
1337