1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file defines the DenseMap class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16 
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/MathExtras.h"
22 #include "llvm/Support/MemAlloc.h"
23 #include "llvm/Support/ReverseIteration.h"
24 #include "llvm/Support/type_traits.h"
25 #include <algorithm>
26 #include <cassert>
27 #include <cstddef>
28 #include <cstring>
29 #include <initializer_list>
30 #include <iterator>
31 #include <new>
32 #include <type_traits>
33 #include <utility>
34 
35 namespace llvm {
36 
37 namespace detail {
38 
39 // We extend a pair to allow users to override the bucket type with their own
40 // implementation without requiring two members.
41 template <typename KeyT, typename ValueT>
42 struct DenseMapPair : public std::pair<KeyT, ValueT> {
43   using std::pair<KeyT, ValueT>::pair;
44 
45   KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
46   const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47   ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48   const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49 };
50 
51 } // end namespace detail
52 
53 template <typename KeyT, typename ValueT,
54           typename KeyInfoT = DenseMapInfo<KeyT>,
55           typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
56           bool IsConst = false>
57 class DenseMapIterator;
58 
59 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60           typename BucketT>
61 class DenseMapBase : public DebugEpochBase {
62   template <typename T>
63   using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64 
65 public:
66   using size_type = unsigned;
67   using key_type = KeyT;
68   using mapped_type = ValueT;
69   using value_type = BucketT;
70 
71   using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
72   using const_iterator =
73       DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
74 
75   inline iterator begin() {
76     // When the map is empty, avoid the overhead of advancing/retreating past
77     // empty buckets.
78     if (empty())
79       return end();
80     if (shouldReverseIterate<KeyT>())
81       return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82     return makeIterator(getBuckets(), getBucketsEnd(), *this);
83   }
84   inline iterator end() {
85     return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86   }
87   inline const_iterator begin() const {
88     if (empty())
89       return end();
90     if (shouldReverseIterate<KeyT>())
91       return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92     return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93   }
94   inline const_iterator end() const {
95     return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96   }
97 
98   LLVM_NODISCARD bool empty() const {
99     return getNumEntries() == 0;
100   }
101   unsigned size() const { return getNumEntries(); }
102 
103   /// Grow the densemap so that it can contain at least \p NumEntries items
104   /// before resizing again.
105   void reserve(size_type NumEntries) {
106     auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
107     incrementEpoch();
108     if (NumBuckets > getNumBuckets())
109       grow(NumBuckets);
110   }
111 
112   void clear() {
113     incrementEpoch();
114     if (getNumEntries() == 0 && getNumTombstones() == 0) return;
115 
116     // If the capacity of the array is huge, and the # elements used is small,
117     // shrink the array.
118     if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
119       shrink_and_clear();
120       return;
121     }
122 
123     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
124     if (std::is_trivially_destructible<ValueT>::value) {
125       // Use a simpler loop when values don't need destruction.
126       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
127         P->getFirst() = EmptyKey;
128     } else {
129       unsigned NumEntries = getNumEntries();
130       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
131         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
132           if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
133             P->getSecond().~ValueT();
134             --NumEntries;
135           }
136           P->getFirst() = EmptyKey;
137         }
138       }
139       assert(NumEntries == 0 && "Node count imbalance!");
140       (void)NumEntries;
141     }
142     setNumEntries(0);
143     setNumTombstones(0);
144   }
145 
146   /// Return 1 if the specified key is in the map, 0 otherwise.
147   size_type count(const_arg_type_t<KeyT> Val) const {
148     const BucketT *TheBucket;
149     return LookupBucketFor(Val, TheBucket) ? 1 : 0;
150   }
151 
152   iterator find(const_arg_type_t<KeyT> Val) {
153     BucketT *TheBucket;
154     if (LookupBucketFor(Val, TheBucket))
155       return makeIterator(TheBucket,
156                           shouldReverseIterate<KeyT>() ? getBuckets()
157                                                        : getBucketsEnd(),
158                           *this, true);
159     return end();
160   }
161   const_iterator find(const_arg_type_t<KeyT> Val) const {
162     const BucketT *TheBucket;
163     if (LookupBucketFor(Val, TheBucket))
164       return makeConstIterator(TheBucket,
165                                shouldReverseIterate<KeyT>() ? getBuckets()
166                                                             : getBucketsEnd(),
167                                *this, true);
168     return end();
169   }
170 
171   /// Alternate version of find() which allows a different, and possibly
172   /// less expensive, key type.
173   /// The DenseMapInfo is responsible for supplying methods
174   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
175   /// type used.
176   template<class LookupKeyT>
177   iterator find_as(const LookupKeyT &Val) {
178     BucketT *TheBucket;
179     if (LookupBucketFor(Val, TheBucket))
180       return makeIterator(TheBucket,
181                           shouldReverseIterate<KeyT>() ? getBuckets()
182                                                        : getBucketsEnd(),
183                           *this, true);
184     return end();
185   }
186   template<class LookupKeyT>
187   const_iterator find_as(const LookupKeyT &Val) const {
188     const BucketT *TheBucket;
189     if (LookupBucketFor(Val, TheBucket))
190       return makeConstIterator(TheBucket,
191                                shouldReverseIterate<KeyT>() ? getBuckets()
192                                                             : getBucketsEnd(),
193                                *this, true);
194     return end();
195   }
196 
197   /// lookup - Return the entry for the specified key, or a default
198   /// constructed value if no such entry exists.
199   ValueT lookup(const_arg_type_t<KeyT> Val) const {
200     const BucketT *TheBucket;
201     if (LookupBucketFor(Val, TheBucket))
202       return TheBucket->getSecond();
203     return ValueT();
204   }
205 
206   // Inserts key,value pair into the map if the key isn't already in the map.
207   // If the key is already in the map, it returns false and doesn't update the
208   // value.
209   std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
210     return try_emplace(KV.first, KV.second);
211   }
212 
213   // Inserts key,value pair into the map if the key isn't already in the map.
214   // If the key is already in the map, it returns false and doesn't update the
215   // value.
216   std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
217     return try_emplace(std::move(KV.first), std::move(KV.second));
218   }
219 
220   // Inserts key,value pair into the map if the key isn't already in the map.
221   // The value is constructed in-place if the key is not in the map, otherwise
222   // it is not moved.
223   template <typename... Ts>
224   std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
225     BucketT *TheBucket;
226     if (LookupBucketFor(Key, TheBucket))
227       return std::make_pair(makeIterator(TheBucket,
228                                          shouldReverseIterate<KeyT>()
229                                              ? getBuckets()
230                                              : getBucketsEnd(),
231                                          *this, true),
232                             false); // Already in map.
233 
234     // Otherwise, insert the new element.
235     TheBucket =
236         InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
237     return std::make_pair(makeIterator(TheBucket,
238                                        shouldReverseIterate<KeyT>()
239                                            ? getBuckets()
240                                            : getBucketsEnd(),
241                                        *this, true),
242                           true);
243   }
244 
245   // Inserts key,value pair into the map if the key isn't already in the map.
246   // The value is constructed in-place if the key is not in the map, otherwise
247   // it is not moved.
248   template <typename... Ts>
249   std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
250     BucketT *TheBucket;
251     if (LookupBucketFor(Key, TheBucket))
252       return std::make_pair(makeIterator(TheBucket,
253                                          shouldReverseIterate<KeyT>()
254                                              ? getBuckets()
255                                              : getBucketsEnd(),
256                                          *this, true),
257                             false); // Already in map.
258 
259     // Otherwise, insert the new element.
260     TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
261     return std::make_pair(makeIterator(TheBucket,
262                                        shouldReverseIterate<KeyT>()
263                                            ? getBuckets()
264                                            : getBucketsEnd(),
265                                        *this, true),
266                           true);
267   }
268 
269   /// Alternate version of insert() which allows a different, and possibly
270   /// less expensive, key type.
271   /// The DenseMapInfo is responsible for supplying methods
272   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
273   /// type used.
274   template <typename LookupKeyT>
275   std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
276                                       const LookupKeyT &Val) {
277     BucketT *TheBucket;
278     if (LookupBucketFor(Val, TheBucket))
279       return std::make_pair(makeIterator(TheBucket,
280                                          shouldReverseIterate<KeyT>()
281                                              ? getBuckets()
282                                              : getBucketsEnd(),
283                                          *this, true),
284                             false); // Already in map.
285 
286     // Otherwise, insert the new element.
287     TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
288                                            std::move(KV.second), Val);
289     return std::make_pair(makeIterator(TheBucket,
290                                        shouldReverseIterate<KeyT>()
291                                            ? getBuckets()
292                                            : getBucketsEnd(),
293                                        *this, true),
294                           true);
295   }
296 
297   /// insert - Range insertion of pairs.
298   template<typename InputIt>
299   void insert(InputIt I, InputIt E) {
300     for (; I != E; ++I)
301       insert(*I);
302   }
303 
304   bool erase(const KeyT &Val) {
305     BucketT *TheBucket;
306     if (!LookupBucketFor(Val, TheBucket))
307       return false; // not in map.
308 
309     TheBucket->getSecond().~ValueT();
310     TheBucket->getFirst() = getTombstoneKey();
311     decrementNumEntries();
312     incrementNumTombstones();
313     return true;
314   }
315   void erase(iterator I) {
316     BucketT *TheBucket = &*I;
317     TheBucket->getSecond().~ValueT();
318     TheBucket->getFirst() = getTombstoneKey();
319     decrementNumEntries();
320     incrementNumTombstones();
321   }
322 
323   value_type& FindAndConstruct(const KeyT &Key) {
324     BucketT *TheBucket;
325     if (LookupBucketFor(Key, TheBucket))
326       return *TheBucket;
327 
328     return *InsertIntoBucket(TheBucket, Key);
329   }
330 
331   ValueT &operator[](const KeyT &Key) {
332     return FindAndConstruct(Key).second;
333   }
334 
335   value_type& FindAndConstruct(KeyT &&Key) {
336     BucketT *TheBucket;
337     if (LookupBucketFor(Key, TheBucket))
338       return *TheBucket;
339 
340     return *InsertIntoBucket(TheBucket, std::move(Key));
341   }
342 
343   ValueT &operator[](KeyT &&Key) {
344     return FindAndConstruct(std::move(Key)).second;
345   }
346 
347   /// isPointerIntoBucketsArray - Return true if the specified pointer points
348   /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
349   /// value in the DenseMap).
350   bool isPointerIntoBucketsArray(const void *Ptr) const {
351     return Ptr >= getBuckets() && Ptr < getBucketsEnd();
352   }
353 
354   /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
355   /// array.  In conjunction with the previous method, this can be used to
356   /// determine whether an insertion caused the DenseMap to reallocate.
357   const void *getPointerIntoBucketsArray() const { return getBuckets(); }
358 
359 protected:
360   DenseMapBase() = default;
361 
362   void destroyAll() {
363     if (getNumBuckets() == 0) // Nothing to do.
364       return;
365 
366     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
367     for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
368       if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
369           !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
370         P->getSecond().~ValueT();
371       P->getFirst().~KeyT();
372     }
373   }
374 
375   void initEmpty() {
376     setNumEntries(0);
377     setNumTombstones(0);
378 
379     assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
380            "# initial buckets must be a power of two!");
381     const KeyT EmptyKey = getEmptyKey();
382     for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
383       ::new (&B->getFirst()) KeyT(EmptyKey);
384   }
385 
386   /// Returns the number of buckets to allocate to ensure that the DenseMap can
387   /// accommodate \p NumEntries without need to grow().
388   unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
389     // Ensure that "NumEntries * 4 < NumBuckets * 3"
390     if (NumEntries == 0)
391       return 0;
392     // +1 is required because of the strict equality.
393     // For example if NumEntries is 48, we need to return 401.
394     return NextPowerOf2(NumEntries * 4 / 3 + 1);
395   }
396 
397   void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
398     initEmpty();
399 
400     // Insert all the old elements.
401     const KeyT EmptyKey = getEmptyKey();
402     const KeyT TombstoneKey = getTombstoneKey();
403     for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
404       if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
405           !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
406         // Insert the key/value into the new table.
407         BucketT *DestBucket;
408         bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
409         (void)FoundVal; // silence warning.
410         assert(!FoundVal && "Key already in new map?");
411         DestBucket->getFirst() = std::move(B->getFirst());
412         ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
413         incrementNumEntries();
414 
415         // Free the value.
416         B->getSecond().~ValueT();
417       }
418       B->getFirst().~KeyT();
419     }
420   }
421 
422   template <typename OtherBaseT>
423   void copyFrom(
424       const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
425     assert(&other != this);
426     assert(getNumBuckets() == other.getNumBuckets());
427 
428     setNumEntries(other.getNumEntries());
429     setNumTombstones(other.getNumTombstones());
430 
431     if (std::is_trivially_copyable<KeyT>::value &&
432         std::is_trivially_copyable<ValueT>::value)
433       memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
434              getNumBuckets() * sizeof(BucketT));
435     else
436       for (size_t i = 0; i < getNumBuckets(); ++i) {
437         ::new (&getBuckets()[i].getFirst())
438             KeyT(other.getBuckets()[i].getFirst());
439         if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
440             !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
441           ::new (&getBuckets()[i].getSecond())
442               ValueT(other.getBuckets()[i].getSecond());
443       }
444   }
445 
446   static unsigned getHashValue(const KeyT &Val) {
447     return KeyInfoT::getHashValue(Val);
448   }
449 
450   template<typename LookupKeyT>
451   static unsigned getHashValue(const LookupKeyT &Val) {
452     return KeyInfoT::getHashValue(Val);
453   }
454 
455   static const KeyT getEmptyKey() {
456     static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
457                   "Must pass the derived type to this template!");
458     return KeyInfoT::getEmptyKey();
459   }
460 
461   static const KeyT getTombstoneKey() {
462     return KeyInfoT::getTombstoneKey();
463   }
464 
465 private:
466   iterator makeIterator(BucketT *P, BucketT *E,
467                         DebugEpochBase &Epoch,
468                         bool NoAdvance=false) {
469     if (shouldReverseIterate<KeyT>()) {
470       BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
471       return iterator(B, E, Epoch, NoAdvance);
472     }
473     return iterator(P, E, Epoch, NoAdvance);
474   }
475 
476   const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
477                                    const DebugEpochBase &Epoch,
478                                    const bool NoAdvance=false) const {
479     if (shouldReverseIterate<KeyT>()) {
480       const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
481       return const_iterator(B, E, Epoch, NoAdvance);
482     }
483     return const_iterator(P, E, Epoch, NoAdvance);
484   }
485 
486   unsigned getNumEntries() const {
487     return static_cast<const DerivedT *>(this)->getNumEntries();
488   }
489 
490   void setNumEntries(unsigned Num) {
491     static_cast<DerivedT *>(this)->setNumEntries(Num);
492   }
493 
494   void incrementNumEntries() {
495     setNumEntries(getNumEntries() + 1);
496   }
497 
498   void decrementNumEntries() {
499     setNumEntries(getNumEntries() - 1);
500   }
501 
502   unsigned getNumTombstones() const {
503     return static_cast<const DerivedT *>(this)->getNumTombstones();
504   }
505 
506   void setNumTombstones(unsigned Num) {
507     static_cast<DerivedT *>(this)->setNumTombstones(Num);
508   }
509 
510   void incrementNumTombstones() {
511     setNumTombstones(getNumTombstones() + 1);
512   }
513 
514   void decrementNumTombstones() {
515     setNumTombstones(getNumTombstones() - 1);
516   }
517 
518   const BucketT *getBuckets() const {
519     return static_cast<const DerivedT *>(this)->getBuckets();
520   }
521 
522   BucketT *getBuckets() {
523     return static_cast<DerivedT *>(this)->getBuckets();
524   }
525 
526   unsigned getNumBuckets() const {
527     return static_cast<const DerivedT *>(this)->getNumBuckets();
528   }
529 
530   BucketT *getBucketsEnd() {
531     return getBuckets() + getNumBuckets();
532   }
533 
534   const BucketT *getBucketsEnd() const {
535     return getBuckets() + getNumBuckets();
536   }
537 
538   void grow(unsigned AtLeast) {
539     static_cast<DerivedT *>(this)->grow(AtLeast);
540   }
541 
542   void shrink_and_clear() {
543     static_cast<DerivedT *>(this)->shrink_and_clear();
544   }
545 
546   template <typename KeyArg, typename... ValueArgs>
547   BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
548                             ValueArgs &&... Values) {
549     TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
550 
551     TheBucket->getFirst() = std::forward<KeyArg>(Key);
552     ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
553     return TheBucket;
554   }
555 
556   template <typename LookupKeyT>
557   BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
558                                       ValueT &&Value, LookupKeyT &Lookup) {
559     TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
560 
561     TheBucket->getFirst() = std::move(Key);
562     ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
563     return TheBucket;
564   }
565 
566   template <typename LookupKeyT>
567   BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
568                                 BucketT *TheBucket) {
569     incrementEpoch();
570 
571     // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
572     // the buckets are empty (meaning that many are filled with tombstones),
573     // grow the table.
574     //
575     // The later case is tricky.  For example, if we had one empty bucket with
576     // tons of tombstones, failing lookups (e.g. for insertion) would have to
577     // probe almost the entire table until it found the empty bucket.  If the
578     // table completely filled with tombstones, no lookup would ever succeed,
579     // causing infinite loops in lookup.
580     unsigned NewNumEntries = getNumEntries() + 1;
581     unsigned NumBuckets = getNumBuckets();
582     if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
583       this->grow(NumBuckets * 2);
584       LookupBucketFor(Lookup, TheBucket);
585       NumBuckets = getNumBuckets();
586     } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
587                              NumBuckets/8)) {
588       this->grow(NumBuckets);
589       LookupBucketFor(Lookup, TheBucket);
590     }
591     assert(TheBucket);
592 
593     // Only update the state after we've grown our bucket space appropriately
594     // so that when growing buckets we have self-consistent entry count.
595     incrementNumEntries();
596 
597     // If we are writing over a tombstone, remember this.
598     const KeyT EmptyKey = getEmptyKey();
599     if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
600       decrementNumTombstones();
601 
602     return TheBucket;
603   }
604 
605   /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
606   /// FoundBucket.  If the bucket contains the key and a value, this returns
607   /// true, otherwise it returns a bucket with an empty marker or tombstone and
608   /// returns false.
609   template<typename LookupKeyT>
610   bool LookupBucketFor(const LookupKeyT &Val,
611                        const BucketT *&FoundBucket) const {
612     const BucketT *BucketsPtr = getBuckets();
613     const unsigned NumBuckets = getNumBuckets();
614 
615     if (NumBuckets == 0) {
616       FoundBucket = nullptr;
617       return false;
618     }
619 
620     // FoundTombstone - Keep track of whether we find a tombstone while probing.
621     const BucketT *FoundTombstone = nullptr;
622     const KeyT EmptyKey = getEmptyKey();
623     const KeyT TombstoneKey = getTombstoneKey();
624     assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
625            !KeyInfoT::isEqual(Val, TombstoneKey) &&
626            "Empty/Tombstone value shouldn't be inserted into map!");
627 
628     unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
629     unsigned ProbeAmt = 1;
630     while (true) {
631       const BucketT *ThisBucket = BucketsPtr + BucketNo;
632       // Found Val's bucket?  If so, return it.
633       if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
634         FoundBucket = ThisBucket;
635         return true;
636       }
637 
638       // If we found an empty bucket, the key doesn't exist in the set.
639       // Insert it and return the default value.
640       if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
641         // If we've already seen a tombstone while probing, fill it in instead
642         // of the empty bucket we eventually probed to.
643         FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
644         return false;
645       }
646 
647       // If this is a tombstone, remember it.  If Val ends up not in the map, we
648       // prefer to return it than something that would require more probing.
649       if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
650           !FoundTombstone)
651         FoundTombstone = ThisBucket;  // Remember the first tombstone found.
652 
653       // Otherwise, it's a hash collision or a tombstone, continue quadratic
654       // probing.
655       BucketNo += ProbeAmt++;
656       BucketNo &= (NumBuckets-1);
657     }
658   }
659 
660   template <typename LookupKeyT>
661   bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
662     const BucketT *ConstFoundBucket;
663     bool Result = const_cast<const DenseMapBase *>(this)
664       ->LookupBucketFor(Val, ConstFoundBucket);
665     FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
666     return Result;
667   }
668 
669 public:
670   /// Return the approximate size (in bytes) of the actual map.
671   /// This is just the raw memory used by DenseMap.
672   /// If entries are pointers to objects, the size of the referenced objects
673   /// are not included.
674   size_t getMemorySize() const {
675     return getNumBuckets() * sizeof(BucketT);
676   }
677 };
678 
679 /// Equality comparison for DenseMap.
680 ///
681 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
682 /// is also in RHS, and that no additional pairs are in RHS.
683 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
684 /// complexity is linear, worst case is O(N^2) (if every hash collides).
685 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
686           typename BucketT>
687 bool operator==(
688     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
689     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
690   if (LHS.size() != RHS.size())
691     return false;
692 
693   for (auto &KV : LHS) {
694     auto I = RHS.find(KV.first);
695     if (I == RHS.end() || I->second != KV.second)
696       return false;
697   }
698 
699   return true;
700 }
701 
702 /// Inequality comparison for DenseMap.
703 ///
704 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
705 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
706           typename BucketT>
707 bool operator!=(
708     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
709     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
710   return !(LHS == RHS);
711 }
712 
713 template <typename KeyT, typename ValueT,
714           typename KeyInfoT = DenseMapInfo<KeyT>,
715           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
716 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
717                                      KeyT, ValueT, KeyInfoT, BucketT> {
718   friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
719 
720   // Lift some types from the dependent base class into this class for
721   // simplicity of referring to them.
722   using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
723 
724   BucketT *Buckets;
725   unsigned NumEntries;
726   unsigned NumTombstones;
727   unsigned NumBuckets;
728 
729 public:
730   /// Create a DenseMap with an optional \p InitialReserve that guarantee that
731   /// this number of elements can be inserted in the map without grow()
732   explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
733 
734   DenseMap(const DenseMap &other) : BaseT() {
735     init(0);
736     copyFrom(other);
737   }
738 
739   DenseMap(DenseMap &&other) : BaseT() {
740     init(0);
741     swap(other);
742   }
743 
744   template<typename InputIt>
745   DenseMap(const InputIt &I, const InputIt &E) {
746     init(std::distance(I, E));
747     this->insert(I, E);
748   }
749 
750   DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
751     init(Vals.size());
752     this->insert(Vals.begin(), Vals.end());
753   }
754 
755   ~DenseMap() {
756     this->destroyAll();
757     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
758   }
759 
760   void swap(DenseMap& RHS) {
761     this->incrementEpoch();
762     RHS.incrementEpoch();
763     std::swap(Buckets, RHS.Buckets);
764     std::swap(NumEntries, RHS.NumEntries);
765     std::swap(NumTombstones, RHS.NumTombstones);
766     std::swap(NumBuckets, RHS.NumBuckets);
767   }
768 
769   DenseMap& operator=(const DenseMap& other) {
770     if (&other != this)
771       copyFrom(other);
772     return *this;
773   }
774 
775   DenseMap& operator=(DenseMap &&other) {
776     this->destroyAll();
777     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
778     init(0);
779     swap(other);
780     return *this;
781   }
782 
783   void copyFrom(const DenseMap& other) {
784     this->destroyAll();
785     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
786     if (allocateBuckets(other.NumBuckets)) {
787       this->BaseT::copyFrom(other);
788     } else {
789       NumEntries = 0;
790       NumTombstones = 0;
791     }
792   }
793 
794   void init(unsigned InitNumEntries) {
795     auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
796     if (allocateBuckets(InitBuckets)) {
797       this->BaseT::initEmpty();
798     } else {
799       NumEntries = 0;
800       NumTombstones = 0;
801     }
802   }
803 
804   void grow(unsigned AtLeast) {
805     unsigned OldNumBuckets = NumBuckets;
806     BucketT *OldBuckets = Buckets;
807 
808     allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
809     assert(Buckets);
810     if (!OldBuckets) {
811       this->BaseT::initEmpty();
812       return;
813     }
814 
815     this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
816 
817     // Free the old table.
818     deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
819                       alignof(BucketT));
820   }
821 
822   void shrink_and_clear() {
823     unsigned OldNumBuckets = NumBuckets;
824     unsigned OldNumEntries = NumEntries;
825     this->destroyAll();
826 
827     // Reduce the number of buckets.
828     unsigned NewNumBuckets = 0;
829     if (OldNumEntries)
830       NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
831     if (NewNumBuckets == NumBuckets) {
832       this->BaseT::initEmpty();
833       return;
834     }
835 
836     deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
837                       alignof(BucketT));
838     init(NewNumBuckets);
839   }
840 
841 private:
842   unsigned getNumEntries() const {
843     return NumEntries;
844   }
845 
846   void setNumEntries(unsigned Num) {
847     NumEntries = Num;
848   }
849 
850   unsigned getNumTombstones() const {
851     return NumTombstones;
852   }
853 
854   void setNumTombstones(unsigned Num) {
855     NumTombstones = Num;
856   }
857 
858   BucketT *getBuckets() const {
859     return Buckets;
860   }
861 
862   unsigned getNumBuckets() const {
863     return NumBuckets;
864   }
865 
866   bool allocateBuckets(unsigned Num) {
867     NumBuckets = Num;
868     if (NumBuckets == 0) {
869       Buckets = nullptr;
870       return false;
871     }
872 
873     Buckets = static_cast<BucketT *>(
874         allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
875     return true;
876   }
877 };
878 
879 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
880           typename KeyInfoT = DenseMapInfo<KeyT>,
881           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
882 class SmallDenseMap
883     : public DenseMapBase<
884           SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
885           ValueT, KeyInfoT, BucketT> {
886   friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
887 
888   // Lift some types from the dependent base class into this class for
889   // simplicity of referring to them.
890   using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
891 
892   static_assert(isPowerOf2_64(InlineBuckets),
893                 "InlineBuckets must be a power of 2.");
894 
895   unsigned Small : 1;
896   unsigned NumEntries : 31;
897   unsigned NumTombstones;
898 
899   struct LargeRep {
900     BucketT *Buckets;
901     unsigned NumBuckets;
902   };
903 
904   /// A "union" of an inline bucket array and the struct representing
905   /// a large bucket. This union will be discriminated by the 'Small' bit.
906   AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
907 
908 public:
909   explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
910     if (NumInitBuckets > InlineBuckets)
911       NumInitBuckets = NextPowerOf2(NumInitBuckets - 1);
912     init(NumInitBuckets);
913   }
914 
915   SmallDenseMap(const SmallDenseMap &other) : BaseT() {
916     init(0);
917     copyFrom(other);
918   }
919 
920   SmallDenseMap(SmallDenseMap &&other) : BaseT() {
921     init(0);
922     swap(other);
923   }
924 
925   template<typename InputIt>
926   SmallDenseMap(const InputIt &I, const InputIt &E) {
927     init(NextPowerOf2(std::distance(I, E)));
928     this->insert(I, E);
929   }
930 
931   SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
932       : SmallDenseMap(Vals.begin(), Vals.end()) {}
933 
934   ~SmallDenseMap() {
935     this->destroyAll();
936     deallocateBuckets();
937   }
938 
939   void swap(SmallDenseMap& RHS) {
940     unsigned TmpNumEntries = RHS.NumEntries;
941     RHS.NumEntries = NumEntries;
942     NumEntries = TmpNumEntries;
943     std::swap(NumTombstones, RHS.NumTombstones);
944 
945     const KeyT EmptyKey = this->getEmptyKey();
946     const KeyT TombstoneKey = this->getTombstoneKey();
947     if (Small && RHS.Small) {
948       // If we're swapping inline bucket arrays, we have to cope with some of
949       // the tricky bits of DenseMap's storage system: the buckets are not
950       // fully initialized. Thus we swap every key, but we may have
951       // a one-directional move of the value.
952       for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
953         BucketT *LHSB = &getInlineBuckets()[i],
954                 *RHSB = &RHS.getInlineBuckets()[i];
955         bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
956                             !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
957         bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
958                             !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
959         if (hasLHSValue && hasRHSValue) {
960           // Swap together if we can...
961           std::swap(*LHSB, *RHSB);
962           continue;
963         }
964         // Swap separately and handle any asymmetry.
965         std::swap(LHSB->getFirst(), RHSB->getFirst());
966         if (hasLHSValue) {
967           ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
968           LHSB->getSecond().~ValueT();
969         } else if (hasRHSValue) {
970           ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
971           RHSB->getSecond().~ValueT();
972         }
973       }
974       return;
975     }
976     if (!Small && !RHS.Small) {
977       std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
978       std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
979       return;
980     }
981 
982     SmallDenseMap &SmallSide = Small ? *this : RHS;
983     SmallDenseMap &LargeSide = Small ? RHS : *this;
984 
985     // First stash the large side's rep and move the small side across.
986     LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
987     LargeSide.getLargeRep()->~LargeRep();
988     LargeSide.Small = true;
989     // This is similar to the standard move-from-old-buckets, but the bucket
990     // count hasn't actually rotated in this case. So we have to carefully
991     // move construct the keys and values into their new locations, but there
992     // is no need to re-hash things.
993     for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
994       BucketT *NewB = &LargeSide.getInlineBuckets()[i],
995               *OldB = &SmallSide.getInlineBuckets()[i];
996       ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
997       OldB->getFirst().~KeyT();
998       if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
999           !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1000         ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1001         OldB->getSecond().~ValueT();
1002       }
1003     }
1004 
1005     // The hard part of moving the small buckets across is done, just move
1006     // the TmpRep into its new home.
1007     SmallSide.Small = false;
1008     new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1009   }
1010 
1011   SmallDenseMap& operator=(const SmallDenseMap& other) {
1012     if (&other != this)
1013       copyFrom(other);
1014     return *this;
1015   }
1016 
1017   SmallDenseMap& operator=(SmallDenseMap &&other) {
1018     this->destroyAll();
1019     deallocateBuckets();
1020     init(0);
1021     swap(other);
1022     return *this;
1023   }
1024 
1025   void copyFrom(const SmallDenseMap& other) {
1026     this->destroyAll();
1027     deallocateBuckets();
1028     Small = true;
1029     if (other.getNumBuckets() > InlineBuckets) {
1030       Small = false;
1031       new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1032     }
1033     this->BaseT::copyFrom(other);
1034   }
1035 
1036   void init(unsigned InitBuckets) {
1037     Small = true;
1038     if (InitBuckets > InlineBuckets) {
1039       Small = false;
1040       new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1041     }
1042     this->BaseT::initEmpty();
1043   }
1044 
1045   void grow(unsigned AtLeast) {
1046     if (AtLeast > InlineBuckets)
1047       AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1048 
1049     if (Small) {
1050       // First move the inline buckets into a temporary storage.
1051       AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1052       BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1053       BucketT *TmpEnd = TmpBegin;
1054 
1055       // Loop over the buckets, moving non-empty, non-tombstones into the
1056       // temporary storage. Have the loop move the TmpEnd forward as it goes.
1057       const KeyT EmptyKey = this->getEmptyKey();
1058       const KeyT TombstoneKey = this->getTombstoneKey();
1059       for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1060         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1061             !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1062           assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1063                  "Too many inline buckets!");
1064           ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1065           ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1066           ++TmpEnd;
1067           P->getSecond().~ValueT();
1068         }
1069         P->getFirst().~KeyT();
1070       }
1071 
1072       // AtLeast == InlineBuckets can happen if there are many tombstones,
1073       // and grow() is used to remove them. Usually we always switch to the
1074       // large rep here.
1075       if (AtLeast > InlineBuckets) {
1076         Small = false;
1077         new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1078       }
1079       this->moveFromOldBuckets(TmpBegin, TmpEnd);
1080       return;
1081     }
1082 
1083     LargeRep OldRep = std::move(*getLargeRep());
1084     getLargeRep()->~LargeRep();
1085     if (AtLeast <= InlineBuckets) {
1086       Small = true;
1087     } else {
1088       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1089     }
1090 
1091     this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1092 
1093     // Free the old table.
1094     deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1095                       alignof(BucketT));
1096   }
1097 
1098   void shrink_and_clear() {
1099     unsigned OldSize = this->size();
1100     this->destroyAll();
1101 
1102     // Reduce the number of buckets.
1103     unsigned NewNumBuckets = 0;
1104     if (OldSize) {
1105       NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1106       if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1107         NewNumBuckets = 64;
1108     }
1109     if ((Small && NewNumBuckets <= InlineBuckets) ||
1110         (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1111       this->BaseT::initEmpty();
1112       return;
1113     }
1114 
1115     deallocateBuckets();
1116     init(NewNumBuckets);
1117   }
1118 
1119 private:
1120   unsigned getNumEntries() const {
1121     return NumEntries;
1122   }
1123 
1124   void setNumEntries(unsigned Num) {
1125     // NumEntries is hardcoded to be 31 bits wide.
1126     assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1127     NumEntries = Num;
1128   }
1129 
1130   unsigned getNumTombstones() const {
1131     return NumTombstones;
1132   }
1133 
1134   void setNumTombstones(unsigned Num) {
1135     NumTombstones = Num;
1136   }
1137 
1138   const BucketT *getInlineBuckets() const {
1139     assert(Small);
1140     // Note that this cast does not violate aliasing rules as we assert that
1141     // the memory's dynamic type is the small, inline bucket buffer, and the
1142     // 'storage' is a POD containing a char buffer.
1143     return reinterpret_cast<const BucketT *>(&storage);
1144   }
1145 
1146   BucketT *getInlineBuckets() {
1147     return const_cast<BucketT *>(
1148       const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1149   }
1150 
1151   const LargeRep *getLargeRep() const {
1152     assert(!Small);
1153     // Note, same rule about aliasing as with getInlineBuckets.
1154     return reinterpret_cast<const LargeRep *>(&storage);
1155   }
1156 
1157   LargeRep *getLargeRep() {
1158     return const_cast<LargeRep *>(
1159       const_cast<const SmallDenseMap *>(this)->getLargeRep());
1160   }
1161 
1162   const BucketT *getBuckets() const {
1163     return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1164   }
1165 
1166   BucketT *getBuckets() {
1167     return const_cast<BucketT *>(
1168       const_cast<const SmallDenseMap *>(this)->getBuckets());
1169   }
1170 
1171   unsigned getNumBuckets() const {
1172     return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1173   }
1174 
1175   void deallocateBuckets() {
1176     if (Small)
1177       return;
1178 
1179     deallocate_buffer(getLargeRep()->Buckets,
1180                       sizeof(BucketT) * getLargeRep()->NumBuckets,
1181                       alignof(BucketT));
1182     getLargeRep()->~LargeRep();
1183   }
1184 
1185   LargeRep allocateBuckets(unsigned Num) {
1186     assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1187     LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1188                         sizeof(BucketT) * Num, alignof(BucketT))),
1189                     Num};
1190     return Rep;
1191   }
1192 };
1193 
1194 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1195           bool IsConst>
1196 class DenseMapIterator : DebugEpochBase::HandleBase {
1197   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1198   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1199 
1200 public:
1201   using difference_type = ptrdiff_t;
1202   using value_type =
1203       typename std::conditional<IsConst, const Bucket, Bucket>::type;
1204   using pointer = value_type *;
1205   using reference = value_type &;
1206   using iterator_category = std::forward_iterator_tag;
1207 
1208 private:
1209   pointer Ptr = nullptr;
1210   pointer End = nullptr;
1211 
1212 public:
1213   DenseMapIterator() = default;
1214 
1215   DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1216                    bool NoAdvance = false)
1217       : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1218     assert(isHandleInSync() && "invalid construction!");
1219 
1220     if (NoAdvance) return;
1221     if (shouldReverseIterate<KeyT>()) {
1222       RetreatPastEmptyBuckets();
1223       return;
1224     }
1225     AdvancePastEmptyBuckets();
1226   }
1227 
1228   // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1229   // for const iterator destinations so it doesn't end up as a user defined copy
1230   // constructor.
1231   template <bool IsConstSrc,
1232             typename = std::enable_if_t<!IsConstSrc && IsConst>>
1233   DenseMapIterator(
1234       const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1235       : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1236 
1237   reference operator*() const {
1238     assert(isHandleInSync() && "invalid iterator access!");
1239     assert(Ptr != End && "dereferencing end() iterator");
1240     if (shouldReverseIterate<KeyT>())
1241       return Ptr[-1];
1242     return *Ptr;
1243   }
1244   pointer operator->() const {
1245     assert(isHandleInSync() && "invalid iterator access!");
1246     assert(Ptr != End && "dereferencing end() iterator");
1247     if (shouldReverseIterate<KeyT>())
1248       return &(Ptr[-1]);
1249     return Ptr;
1250   }
1251 
1252   friend bool operator==(const DenseMapIterator &LHS,
1253                          const DenseMapIterator &RHS) {
1254     assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1255     assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1256     assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1257            "comparing incomparable iterators!");
1258     return LHS.Ptr == RHS.Ptr;
1259   }
1260 
1261   friend bool operator!=(const DenseMapIterator &LHS,
1262                          const DenseMapIterator &RHS) {
1263     return !(LHS == RHS);
1264   }
1265 
1266   inline DenseMapIterator& operator++() {  // Preincrement
1267     assert(isHandleInSync() && "invalid iterator access!");
1268     assert(Ptr != End && "incrementing end() iterator");
1269     if (shouldReverseIterate<KeyT>()) {
1270       --Ptr;
1271       RetreatPastEmptyBuckets();
1272       return *this;
1273     }
1274     ++Ptr;
1275     AdvancePastEmptyBuckets();
1276     return *this;
1277   }
1278   DenseMapIterator operator++(int) {  // Postincrement
1279     assert(isHandleInSync() && "invalid iterator access!");
1280     DenseMapIterator tmp = *this; ++*this; return tmp;
1281   }
1282 
1283 private:
1284   void AdvancePastEmptyBuckets() {
1285     assert(Ptr <= End);
1286     const KeyT Empty = KeyInfoT::getEmptyKey();
1287     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1288 
1289     while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1290                           KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1291       ++Ptr;
1292   }
1293 
1294   void RetreatPastEmptyBuckets() {
1295     assert(Ptr >= End);
1296     const KeyT Empty = KeyInfoT::getEmptyKey();
1297     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1298 
1299     while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1300                           KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1301       --Ptr;
1302   }
1303 };
1304 
1305 template <typename KeyT, typename ValueT, typename KeyInfoT>
1306 inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1307   return X.getMemorySize();
1308 }
1309 
1310 } // end namespace llvm
1311 
1312 #endif // LLVM_ADT_DENSEMAP_H
1313