1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the DenseMap class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_ADT_DENSEMAP_H
14 #define LLVM_ADT_DENSEMAP_H
15 
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/EpochTracker.h"
18 #include "llvm/Support/AlignOf.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/MathExtras.h"
21 #include "llvm/Support/ReverseIteration.h"
22 #include "llvm/Support/type_traits.h"
23 #include <algorithm>
24 #include <cassert>
25 #include <cstddef>
26 #include <cstring>
27 #include <initializer_list>
28 #include <iterator>
29 #include <new>
30 #include <type_traits>
31 #include <utility>
32 
33 namespace llvm {
34 
35 namespace detail {
36 
37 // We extend a pair to allow users to override the bucket type with their own
38 // implementation without requiring two members.
39 template <typename KeyT, typename ValueT>
40 struct DenseMapPair : public std::pair<KeyT, ValueT> {
41   using std::pair<KeyT, ValueT>::pair;
42 
43   KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
44   const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
45   ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
46   const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
47 };
48 
49 } // end namespace detail
50 
51 template <typename KeyT, typename ValueT,
52           typename KeyInfoT = DenseMapInfo<KeyT>,
53           typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
54           bool IsConst = false>
55 class DenseMapIterator;
56 
57 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
58           typename BucketT>
59 class DenseMapBase : public DebugEpochBase {
60   template <typename T>
61   using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
62 
63 public:
64   using size_type = unsigned;
65   using key_type = KeyT;
66   using mapped_type = ValueT;
67   using value_type = BucketT;
68 
69   using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
70   using const_iterator =
71       DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
72 
73   inline iterator begin() {
74     // When the map is empty, avoid the overhead of advancing/retreating past
75     // empty buckets.
76     if (empty())
77       return end();
78     if (shouldReverseIterate<KeyT>())
79       return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
80     return makeIterator(getBuckets(), getBucketsEnd(), *this);
81   }
82   inline iterator end() {
83     return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
84   }
85   inline const_iterator begin() const {
86     if (empty())
87       return end();
88     if (shouldReverseIterate<KeyT>())
89       return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
90     return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
91   }
92   inline const_iterator end() const {
93     return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
94   }
95 
96   LLVM_NODISCARD bool empty() const {
97     return getNumEntries() == 0;
98   }
99   unsigned size() const { return getNumEntries(); }
100 
101   /// Grow the densemap so that it can contain at least \p NumEntries items
102   /// before resizing again.
103   void reserve(size_type NumEntries) {
104     auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
105     incrementEpoch();
106     if (NumBuckets > getNumBuckets())
107       grow(NumBuckets);
108   }
109 
110   void clear() {
111     incrementEpoch();
112     if (getNumEntries() == 0 && getNumTombstones() == 0) return;
113 
114     // If the capacity of the array is huge, and the # elements used is small,
115     // shrink the array.
116     if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
117       shrink_and_clear();
118       return;
119     }
120 
121     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
122     if (is_trivially_copyable<KeyT>::value &&
123         is_trivially_copyable<ValueT>::value) {
124       // Use a simpler loop when these are trivial types.
125       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
126         P->getFirst() = EmptyKey;
127     } else {
128       unsigned NumEntries = getNumEntries();
129       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
130         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
131           if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
132             P->getSecond().~ValueT();
133             --NumEntries;
134           }
135           P->getFirst() = EmptyKey;
136         }
137       }
138       assert(NumEntries == 0 && "Node count imbalance!");
139     }
140     setNumEntries(0);
141     setNumTombstones(0);
142   }
143 
144   /// Return 1 if the specified key is in the map, 0 otherwise.
145   size_type count(const_arg_type_t<KeyT> Val) const {
146     const BucketT *TheBucket;
147     return LookupBucketFor(Val, TheBucket) ? 1 : 0;
148   }
149 
150   iterator find(const_arg_type_t<KeyT> Val) {
151     BucketT *TheBucket;
152     if (LookupBucketFor(Val, TheBucket))
153       return makeIterator(TheBucket, getBucketsEnd(), *this, true);
154     return end();
155   }
156   const_iterator find(const_arg_type_t<KeyT> Val) const {
157     const BucketT *TheBucket;
158     if (LookupBucketFor(Val, TheBucket))
159       return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
160     return end();
161   }
162 
163   /// Alternate version of find() which allows a different, and possibly
164   /// less expensive, key type.
165   /// The DenseMapInfo is responsible for supplying methods
166   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
167   /// type used.
168   template<class LookupKeyT>
169   iterator find_as(const LookupKeyT &Val) {
170     BucketT *TheBucket;
171     if (LookupBucketFor(Val, TheBucket))
172       return makeIterator(TheBucket, getBucketsEnd(), *this, true);
173     return end();
174   }
175   template<class LookupKeyT>
176   const_iterator find_as(const LookupKeyT &Val) const {
177     const BucketT *TheBucket;
178     if (LookupBucketFor(Val, TheBucket))
179       return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
180     return end();
181   }
182 
183   /// lookup - Return the entry for the specified key, or a default
184   /// constructed value if no such entry exists.
185   ValueT lookup(const_arg_type_t<KeyT> Val) const {
186     const BucketT *TheBucket;
187     if (LookupBucketFor(Val, TheBucket))
188       return TheBucket->getSecond();
189     return ValueT();
190   }
191 
192   // Inserts key,value pair into the map if the key isn't already in the map.
193   // If the key is already in the map, it returns false and doesn't update the
194   // value.
195   std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
196     return try_emplace(KV.first, KV.second);
197   }
198 
199   // Inserts key,value pair into the map if the key isn't already in the map.
200   // If the key is already in the map, it returns false and doesn't update the
201   // value.
202   std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
203     return try_emplace(std::move(KV.first), std::move(KV.second));
204   }
205 
206   // Inserts key,value pair into the map if the key isn't already in the map.
207   // The value is constructed in-place if the key is not in the map, otherwise
208   // it is not moved.
209   template <typename... Ts>
210   std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
211     BucketT *TheBucket;
212     if (LookupBucketFor(Key, TheBucket))
213       return std::make_pair(
214                makeIterator(TheBucket, getBucketsEnd(), *this, true),
215                false); // Already in map.
216 
217     // Otherwise, insert the new element.
218     TheBucket =
219         InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
220     return std::make_pair(
221              makeIterator(TheBucket, getBucketsEnd(), *this, true),
222              true);
223   }
224 
225   // Inserts key,value pair into the map if the key isn't already in the map.
226   // The value is constructed in-place if the key is not in the map, otherwise
227   // it is not moved.
228   template <typename... Ts>
229   std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
230     BucketT *TheBucket;
231     if (LookupBucketFor(Key, TheBucket))
232       return std::make_pair(
233                makeIterator(TheBucket, getBucketsEnd(), *this, true),
234                false); // Already in map.
235 
236     // Otherwise, insert the new element.
237     TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
238     return std::make_pair(
239              makeIterator(TheBucket, getBucketsEnd(), *this, true),
240              true);
241   }
242 
243   /// Alternate version of insert() which allows a different, and possibly
244   /// less expensive, key type.
245   /// The DenseMapInfo is responsible for supplying methods
246   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
247   /// type used.
248   template <typename LookupKeyT>
249   std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
250                                       const LookupKeyT &Val) {
251     BucketT *TheBucket;
252     if (LookupBucketFor(Val, TheBucket))
253       return std::make_pair(
254                makeIterator(TheBucket, getBucketsEnd(), *this, true),
255                false); // Already in map.
256 
257     // Otherwise, insert the new element.
258     TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
259                                            std::move(KV.second), Val);
260     return std::make_pair(
261              makeIterator(TheBucket, getBucketsEnd(), *this, true),
262              true);
263   }
264 
265   /// insert - Range insertion of pairs.
266   template<typename InputIt>
267   void insert(InputIt I, InputIt E) {
268     for (; I != E; ++I)
269       insert(*I);
270   }
271 
272   bool erase(const KeyT &Val) {
273     BucketT *TheBucket;
274     if (!LookupBucketFor(Val, TheBucket))
275       return false; // not in map.
276 
277     TheBucket->getSecond().~ValueT();
278     TheBucket->getFirst() = getTombstoneKey();
279     decrementNumEntries();
280     incrementNumTombstones();
281     return true;
282   }
283   void erase(iterator I) {
284     BucketT *TheBucket = &*I;
285     TheBucket->getSecond().~ValueT();
286     TheBucket->getFirst() = getTombstoneKey();
287     decrementNumEntries();
288     incrementNumTombstones();
289   }
290 
291   value_type& FindAndConstruct(const KeyT &Key) {
292     BucketT *TheBucket;
293     if (LookupBucketFor(Key, TheBucket))
294       return *TheBucket;
295 
296     return *InsertIntoBucket(TheBucket, Key);
297   }
298 
299   ValueT &operator[](const KeyT &Key) {
300     return FindAndConstruct(Key).second;
301   }
302 
303   value_type& FindAndConstruct(KeyT &&Key) {
304     BucketT *TheBucket;
305     if (LookupBucketFor(Key, TheBucket))
306       return *TheBucket;
307 
308     return *InsertIntoBucket(TheBucket, std::move(Key));
309   }
310 
311   ValueT &operator[](KeyT &&Key) {
312     return FindAndConstruct(std::move(Key)).second;
313   }
314 
315   /// isPointerIntoBucketsArray - Return true if the specified pointer points
316   /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
317   /// value in the DenseMap).
318   bool isPointerIntoBucketsArray(const void *Ptr) const {
319     return Ptr >= getBuckets() && Ptr < getBucketsEnd();
320   }
321 
322   /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
323   /// array.  In conjunction with the previous method, this can be used to
324   /// determine whether an insertion caused the DenseMap to reallocate.
325   const void *getPointerIntoBucketsArray() const { return getBuckets(); }
326 
327 protected:
328   DenseMapBase() = default;
329 
330   void destroyAll() {
331     if (getNumBuckets() == 0) // Nothing to do.
332       return;
333 
334     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
335     for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
336       if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
337           !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
338         P->getSecond().~ValueT();
339       P->getFirst().~KeyT();
340     }
341   }
342 
343   void initEmpty() {
344     setNumEntries(0);
345     setNumTombstones(0);
346 
347     assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
348            "# initial buckets must be a power of two!");
349     const KeyT EmptyKey = getEmptyKey();
350     for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
351       ::new (&B->getFirst()) KeyT(EmptyKey);
352   }
353 
354   /// Returns the number of buckets to allocate to ensure that the DenseMap can
355   /// accommodate \p NumEntries without need to grow().
356   unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
357     // Ensure that "NumEntries * 4 < NumBuckets * 3"
358     if (NumEntries == 0)
359       return 0;
360     // +1 is required because of the strict equality.
361     // For example if NumEntries is 48, we need to return 401.
362     return NextPowerOf2(NumEntries * 4 / 3 + 1);
363   }
364 
365   void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
366     initEmpty();
367 
368     // Insert all the old elements.
369     const KeyT EmptyKey = getEmptyKey();
370     const KeyT TombstoneKey = getTombstoneKey();
371     for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
372       if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
373           !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
374         // Insert the key/value into the new table.
375         BucketT *DestBucket;
376         bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
377         (void)FoundVal; // silence warning.
378         assert(!FoundVal && "Key already in new map?");
379         DestBucket->getFirst() = std::move(B->getFirst());
380         ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
381         incrementNumEntries();
382 
383         // Free the value.
384         B->getSecond().~ValueT();
385       }
386       B->getFirst().~KeyT();
387     }
388   }
389 
390   template <typename OtherBaseT>
391   void copyFrom(
392       const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
393     assert(&other != this);
394     assert(getNumBuckets() == other.getNumBuckets());
395 
396     setNumEntries(other.getNumEntries());
397     setNumTombstones(other.getNumTombstones());
398 
399     if (is_trivially_copyable<KeyT>::value &&
400         is_trivially_copyable<ValueT>::value)
401       memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
402              getNumBuckets() * sizeof(BucketT));
403     else
404       for (size_t i = 0; i < getNumBuckets(); ++i) {
405         ::new (&getBuckets()[i].getFirst())
406             KeyT(other.getBuckets()[i].getFirst());
407         if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
408             !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
409           ::new (&getBuckets()[i].getSecond())
410               ValueT(other.getBuckets()[i].getSecond());
411       }
412   }
413 
414   static unsigned getHashValue(const KeyT &Val) {
415     return KeyInfoT::getHashValue(Val);
416   }
417 
418   template<typename LookupKeyT>
419   static unsigned getHashValue(const LookupKeyT &Val) {
420     return KeyInfoT::getHashValue(Val);
421   }
422 
423   static const KeyT getEmptyKey() {
424     static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
425                   "Must pass the derived type to this template!");
426     return KeyInfoT::getEmptyKey();
427   }
428 
429   static const KeyT getTombstoneKey() {
430     return KeyInfoT::getTombstoneKey();
431   }
432 
433 private:
434   iterator makeIterator(BucketT *P, BucketT *E,
435                         DebugEpochBase &Epoch,
436                         bool NoAdvance=false) {
437     if (shouldReverseIterate<KeyT>()) {
438       BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
439       return iterator(B, E, Epoch, NoAdvance);
440     }
441     return iterator(P, E, Epoch, NoAdvance);
442   }
443 
444   const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
445                                    const DebugEpochBase &Epoch,
446                                    const bool NoAdvance=false) const {
447     if (shouldReverseIterate<KeyT>()) {
448       const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
449       return const_iterator(B, E, Epoch, NoAdvance);
450     }
451     return const_iterator(P, E, Epoch, NoAdvance);
452   }
453 
454   unsigned getNumEntries() const {
455     return static_cast<const DerivedT *>(this)->getNumEntries();
456   }
457 
458   void setNumEntries(unsigned Num) {
459     static_cast<DerivedT *>(this)->setNumEntries(Num);
460   }
461 
462   void incrementNumEntries() {
463     setNumEntries(getNumEntries() + 1);
464   }
465 
466   void decrementNumEntries() {
467     setNumEntries(getNumEntries() - 1);
468   }
469 
470   unsigned getNumTombstones() const {
471     return static_cast<const DerivedT *>(this)->getNumTombstones();
472   }
473 
474   void setNumTombstones(unsigned Num) {
475     static_cast<DerivedT *>(this)->setNumTombstones(Num);
476   }
477 
478   void incrementNumTombstones() {
479     setNumTombstones(getNumTombstones() + 1);
480   }
481 
482   void decrementNumTombstones() {
483     setNumTombstones(getNumTombstones() - 1);
484   }
485 
486   const BucketT *getBuckets() const {
487     return static_cast<const DerivedT *>(this)->getBuckets();
488   }
489 
490   BucketT *getBuckets() {
491     return static_cast<DerivedT *>(this)->getBuckets();
492   }
493 
494   unsigned getNumBuckets() const {
495     return static_cast<const DerivedT *>(this)->getNumBuckets();
496   }
497 
498   BucketT *getBucketsEnd() {
499     return getBuckets() + getNumBuckets();
500   }
501 
502   const BucketT *getBucketsEnd() const {
503     return getBuckets() + getNumBuckets();
504   }
505 
506   void grow(unsigned AtLeast) {
507     static_cast<DerivedT *>(this)->grow(AtLeast);
508   }
509 
510   void shrink_and_clear() {
511     static_cast<DerivedT *>(this)->shrink_and_clear();
512   }
513 
514   template <typename KeyArg, typename... ValueArgs>
515   BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
516                             ValueArgs &&... Values) {
517     TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
518 
519     TheBucket->getFirst() = std::forward<KeyArg>(Key);
520     ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
521     return TheBucket;
522   }
523 
524   template <typename LookupKeyT>
525   BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
526                                       ValueT &&Value, LookupKeyT &Lookup) {
527     TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
528 
529     TheBucket->getFirst() = std::move(Key);
530     ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
531     return TheBucket;
532   }
533 
534   template <typename LookupKeyT>
535   BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
536                                 BucketT *TheBucket) {
537     incrementEpoch();
538 
539     // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
540     // the buckets are empty (meaning that many are filled with tombstones),
541     // grow the table.
542     //
543     // The later case is tricky.  For example, if we had one empty bucket with
544     // tons of tombstones, failing lookups (e.g. for insertion) would have to
545     // probe almost the entire table until it found the empty bucket.  If the
546     // table completely filled with tombstones, no lookup would ever succeed,
547     // causing infinite loops in lookup.
548     unsigned NewNumEntries = getNumEntries() + 1;
549     unsigned NumBuckets = getNumBuckets();
550     if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
551       this->grow(NumBuckets * 2);
552       LookupBucketFor(Lookup, TheBucket);
553       NumBuckets = getNumBuckets();
554     } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
555                              NumBuckets/8)) {
556       this->grow(NumBuckets);
557       LookupBucketFor(Lookup, TheBucket);
558     }
559     assert(TheBucket);
560 
561     // Only update the state after we've grown our bucket space appropriately
562     // so that when growing buckets we have self-consistent entry count.
563     incrementNumEntries();
564 
565     // If we are writing over a tombstone, remember this.
566     const KeyT EmptyKey = getEmptyKey();
567     if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
568       decrementNumTombstones();
569 
570     return TheBucket;
571   }
572 
573   /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
574   /// FoundBucket.  If the bucket contains the key and a value, this returns
575   /// true, otherwise it returns a bucket with an empty marker or tombstone and
576   /// returns false.
577   template<typename LookupKeyT>
578   bool LookupBucketFor(const LookupKeyT &Val,
579                        const BucketT *&FoundBucket) const {
580     const BucketT *BucketsPtr = getBuckets();
581     const unsigned NumBuckets = getNumBuckets();
582 
583     if (NumBuckets == 0) {
584       FoundBucket = nullptr;
585       return false;
586     }
587 
588     // FoundTombstone - Keep track of whether we find a tombstone while probing.
589     const BucketT *FoundTombstone = nullptr;
590     const KeyT EmptyKey = getEmptyKey();
591     const KeyT TombstoneKey = getTombstoneKey();
592     assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
593            !KeyInfoT::isEqual(Val, TombstoneKey) &&
594            "Empty/Tombstone value shouldn't be inserted into map!");
595 
596     unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
597     unsigned ProbeAmt = 1;
598     while (true) {
599       const BucketT *ThisBucket = BucketsPtr + BucketNo;
600       // Found Val's bucket?  If so, return it.
601       if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
602         FoundBucket = ThisBucket;
603         return true;
604       }
605 
606       // If we found an empty bucket, the key doesn't exist in the set.
607       // Insert it and return the default value.
608       if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
609         // If we've already seen a tombstone while probing, fill it in instead
610         // of the empty bucket we eventually probed to.
611         FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
612         return false;
613       }
614 
615       // If this is a tombstone, remember it.  If Val ends up not in the map, we
616       // prefer to return it than something that would require more probing.
617       if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
618           !FoundTombstone)
619         FoundTombstone = ThisBucket;  // Remember the first tombstone found.
620 
621       // Otherwise, it's a hash collision or a tombstone, continue quadratic
622       // probing.
623       BucketNo += ProbeAmt++;
624       BucketNo &= (NumBuckets-1);
625     }
626   }
627 
628   template <typename LookupKeyT>
629   bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
630     const BucketT *ConstFoundBucket;
631     bool Result = const_cast<const DenseMapBase *>(this)
632       ->LookupBucketFor(Val, ConstFoundBucket);
633     FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
634     return Result;
635   }
636 
637 public:
638   /// Return the approximate size (in bytes) of the actual map.
639   /// This is just the raw memory used by DenseMap.
640   /// If entries are pointers to objects, the size of the referenced objects
641   /// are not included.
642   size_t getMemorySize() const {
643     return getNumBuckets() * sizeof(BucketT);
644   }
645 };
646 
647 /// Equality comparison for DenseMap.
648 ///
649 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
650 /// is also in RHS, and that no additional pairs are in RHS.
651 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
652 /// complexity is linear, worst case is O(N^2) (if every hash collides).
653 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
654           typename BucketT>
655 bool operator==(
656     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
657     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
658   if (LHS.size() != RHS.size())
659     return false;
660 
661   for (auto &KV : LHS) {
662     auto I = RHS.find(KV.first);
663     if (I == RHS.end() || I->second != KV.second)
664       return false;
665   }
666 
667   return true;
668 }
669 
670 /// Inequality comparison for DenseMap.
671 ///
672 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
673 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
674           typename BucketT>
675 bool operator!=(
676     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
677     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
678   return !(LHS == RHS);
679 }
680 
681 template <typename KeyT, typename ValueT,
682           typename KeyInfoT = DenseMapInfo<KeyT>,
683           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
684 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
685                                      KeyT, ValueT, KeyInfoT, BucketT> {
686   friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
687 
688   // Lift some types from the dependent base class into this class for
689   // simplicity of referring to them.
690   using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
691 
692   BucketT *Buckets;
693   unsigned NumEntries;
694   unsigned NumTombstones;
695   unsigned NumBuckets;
696 
697 public:
698   /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
699   /// this number of elements can be inserted in the map without grow()
700   explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
701 
702   DenseMap(const DenseMap &other) : BaseT() {
703     init(0);
704     copyFrom(other);
705   }
706 
707   DenseMap(DenseMap &&other) : BaseT() {
708     init(0);
709     swap(other);
710   }
711 
712   template<typename InputIt>
713   DenseMap(const InputIt &I, const InputIt &E) {
714     init(std::distance(I, E));
715     this->insert(I, E);
716   }
717 
718   DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
719     init(Vals.size());
720     this->insert(Vals.begin(), Vals.end());
721   }
722 
723   ~DenseMap() {
724     this->destroyAll();
725     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
726   }
727 
728   void swap(DenseMap& RHS) {
729     this->incrementEpoch();
730     RHS.incrementEpoch();
731     std::swap(Buckets, RHS.Buckets);
732     std::swap(NumEntries, RHS.NumEntries);
733     std::swap(NumTombstones, RHS.NumTombstones);
734     std::swap(NumBuckets, RHS.NumBuckets);
735   }
736 
737   DenseMap& operator=(const DenseMap& other) {
738     if (&other != this)
739       copyFrom(other);
740     return *this;
741   }
742 
743   DenseMap& operator=(DenseMap &&other) {
744     this->destroyAll();
745     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
746     init(0);
747     swap(other);
748     return *this;
749   }
750 
751   void copyFrom(const DenseMap& other) {
752     this->destroyAll();
753     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
754     if (allocateBuckets(other.NumBuckets)) {
755       this->BaseT::copyFrom(other);
756     } else {
757       NumEntries = 0;
758       NumTombstones = 0;
759     }
760   }
761 
762   void init(unsigned InitNumEntries) {
763     auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
764     if (allocateBuckets(InitBuckets)) {
765       this->BaseT::initEmpty();
766     } else {
767       NumEntries = 0;
768       NumTombstones = 0;
769     }
770   }
771 
772   void grow(unsigned AtLeast) {
773     unsigned OldNumBuckets = NumBuckets;
774     BucketT *OldBuckets = Buckets;
775 
776     allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
777     assert(Buckets);
778     if (!OldBuckets) {
779       this->BaseT::initEmpty();
780       return;
781     }
782 
783     this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
784 
785     // Free the old table.
786     deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
787                       alignof(BucketT));
788   }
789 
790   void shrink_and_clear() {
791     unsigned OldNumBuckets = NumBuckets;
792     unsigned OldNumEntries = NumEntries;
793     this->destroyAll();
794 
795     // Reduce the number of buckets.
796     unsigned NewNumBuckets = 0;
797     if (OldNumEntries)
798       NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
799     if (NewNumBuckets == NumBuckets) {
800       this->BaseT::initEmpty();
801       return;
802     }
803 
804     deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
805                       alignof(BucketT));
806     init(NewNumBuckets);
807   }
808 
809 private:
810   unsigned getNumEntries() const {
811     return NumEntries;
812   }
813 
814   void setNumEntries(unsigned Num) {
815     NumEntries = Num;
816   }
817 
818   unsigned getNumTombstones() const {
819     return NumTombstones;
820   }
821 
822   void setNumTombstones(unsigned Num) {
823     NumTombstones = Num;
824   }
825 
826   BucketT *getBuckets() const {
827     return Buckets;
828   }
829 
830   unsigned getNumBuckets() const {
831     return NumBuckets;
832   }
833 
834   bool allocateBuckets(unsigned Num) {
835     NumBuckets = Num;
836     if (NumBuckets == 0) {
837       Buckets = nullptr;
838       return false;
839     }
840 
841     Buckets = static_cast<BucketT *>(
842         allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
843     return true;
844   }
845 };
846 
847 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
848           typename KeyInfoT = DenseMapInfo<KeyT>,
849           typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
850 class SmallDenseMap
851     : public DenseMapBase<
852           SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
853           ValueT, KeyInfoT, BucketT> {
854   friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
855 
856   // Lift some types from the dependent base class into this class for
857   // simplicity of referring to them.
858   using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
859 
860   static_assert(isPowerOf2_64(InlineBuckets),
861                 "InlineBuckets must be a power of 2.");
862 
863   unsigned Small : 1;
864   unsigned NumEntries : 31;
865   unsigned NumTombstones;
866 
867   struct LargeRep {
868     BucketT *Buckets;
869     unsigned NumBuckets;
870   };
871 
872   /// A "union" of an inline bucket array and the struct representing
873   /// a large bucket. This union will be discriminated by the 'Small' bit.
874   AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
875 
876 public:
877   explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
878     init(NumInitBuckets);
879   }
880 
881   SmallDenseMap(const SmallDenseMap &other) : BaseT() {
882     init(0);
883     copyFrom(other);
884   }
885 
886   SmallDenseMap(SmallDenseMap &&other) : BaseT() {
887     init(0);
888     swap(other);
889   }
890 
891   template<typename InputIt>
892   SmallDenseMap(const InputIt &I, const InputIt &E) {
893     init(NextPowerOf2(std::distance(I, E)));
894     this->insert(I, E);
895   }
896 
897   ~SmallDenseMap() {
898     this->destroyAll();
899     deallocateBuckets();
900   }
901 
902   void swap(SmallDenseMap& RHS) {
903     unsigned TmpNumEntries = RHS.NumEntries;
904     RHS.NumEntries = NumEntries;
905     NumEntries = TmpNumEntries;
906     std::swap(NumTombstones, RHS.NumTombstones);
907 
908     const KeyT EmptyKey = this->getEmptyKey();
909     const KeyT TombstoneKey = this->getTombstoneKey();
910     if (Small && RHS.Small) {
911       // If we're swapping inline bucket arrays, we have to cope with some of
912       // the tricky bits of DenseMap's storage system: the buckets are not
913       // fully initialized. Thus we swap every key, but we may have
914       // a one-directional move of the value.
915       for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
916         BucketT *LHSB = &getInlineBuckets()[i],
917                 *RHSB = &RHS.getInlineBuckets()[i];
918         bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
919                             !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
920         bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
921                             !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
922         if (hasLHSValue && hasRHSValue) {
923           // Swap together if we can...
924           std::swap(*LHSB, *RHSB);
925           continue;
926         }
927         // Swap separately and handle any assymetry.
928         std::swap(LHSB->getFirst(), RHSB->getFirst());
929         if (hasLHSValue) {
930           ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
931           LHSB->getSecond().~ValueT();
932         } else if (hasRHSValue) {
933           ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
934           RHSB->getSecond().~ValueT();
935         }
936       }
937       return;
938     }
939     if (!Small && !RHS.Small) {
940       std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
941       std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
942       return;
943     }
944 
945     SmallDenseMap &SmallSide = Small ? *this : RHS;
946     SmallDenseMap &LargeSide = Small ? RHS : *this;
947 
948     // First stash the large side's rep and move the small side across.
949     LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
950     LargeSide.getLargeRep()->~LargeRep();
951     LargeSide.Small = true;
952     // This is similar to the standard move-from-old-buckets, but the bucket
953     // count hasn't actually rotated in this case. So we have to carefully
954     // move construct the keys and values into their new locations, but there
955     // is no need to re-hash things.
956     for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
957       BucketT *NewB = &LargeSide.getInlineBuckets()[i],
958               *OldB = &SmallSide.getInlineBuckets()[i];
959       ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
960       OldB->getFirst().~KeyT();
961       if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
962           !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
963         ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
964         OldB->getSecond().~ValueT();
965       }
966     }
967 
968     // The hard part of moving the small buckets across is done, just move
969     // the TmpRep into its new home.
970     SmallSide.Small = false;
971     new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
972   }
973 
974   SmallDenseMap& operator=(const SmallDenseMap& other) {
975     if (&other != this)
976       copyFrom(other);
977     return *this;
978   }
979 
980   SmallDenseMap& operator=(SmallDenseMap &&other) {
981     this->destroyAll();
982     deallocateBuckets();
983     init(0);
984     swap(other);
985     return *this;
986   }
987 
988   void copyFrom(const SmallDenseMap& other) {
989     this->destroyAll();
990     deallocateBuckets();
991     Small = true;
992     if (other.getNumBuckets() > InlineBuckets) {
993       Small = false;
994       new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
995     }
996     this->BaseT::copyFrom(other);
997   }
998 
999   void init(unsigned InitBuckets) {
1000     Small = true;
1001     if (InitBuckets > InlineBuckets) {
1002       Small = false;
1003       new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1004     }
1005     this->BaseT::initEmpty();
1006   }
1007 
1008   void grow(unsigned AtLeast) {
1009     if (AtLeast > InlineBuckets)
1010       AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1011 
1012     if (Small) {
1013       // First move the inline buckets into a temporary storage.
1014       AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1015       BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
1016       BucketT *TmpEnd = TmpBegin;
1017 
1018       // Loop over the buckets, moving non-empty, non-tombstones into the
1019       // temporary storage. Have the loop move the TmpEnd forward as it goes.
1020       const KeyT EmptyKey = this->getEmptyKey();
1021       const KeyT TombstoneKey = this->getTombstoneKey();
1022       for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1023         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1024             !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1025           assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1026                  "Too many inline buckets!");
1027           ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1028           ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1029           ++TmpEnd;
1030           P->getSecond().~ValueT();
1031         }
1032         P->getFirst().~KeyT();
1033       }
1034 
1035       // AtLeast == InlineBuckets can happen if there are many tombstones,
1036       // and grow() is used to remove them. Usually we always switch to the
1037       // large rep here.
1038       if (AtLeast > InlineBuckets) {
1039         Small = false;
1040         new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1041       }
1042       this->moveFromOldBuckets(TmpBegin, TmpEnd);
1043       return;
1044     }
1045 
1046     LargeRep OldRep = std::move(*getLargeRep());
1047     getLargeRep()->~LargeRep();
1048     if (AtLeast <= InlineBuckets) {
1049       Small = true;
1050     } else {
1051       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1052     }
1053 
1054     this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1055 
1056     // Free the old table.
1057     deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1058                       alignof(BucketT));
1059   }
1060 
1061   void shrink_and_clear() {
1062     unsigned OldSize = this->size();
1063     this->destroyAll();
1064 
1065     // Reduce the number of buckets.
1066     unsigned NewNumBuckets = 0;
1067     if (OldSize) {
1068       NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1069       if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1070         NewNumBuckets = 64;
1071     }
1072     if ((Small && NewNumBuckets <= InlineBuckets) ||
1073         (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1074       this->BaseT::initEmpty();
1075       return;
1076     }
1077 
1078     deallocateBuckets();
1079     init(NewNumBuckets);
1080   }
1081 
1082 private:
1083   unsigned getNumEntries() const {
1084     return NumEntries;
1085   }
1086 
1087   void setNumEntries(unsigned Num) {
1088     // NumEntries is hardcoded to be 31 bits wide.
1089     assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1090     NumEntries = Num;
1091   }
1092 
1093   unsigned getNumTombstones() const {
1094     return NumTombstones;
1095   }
1096 
1097   void setNumTombstones(unsigned Num) {
1098     NumTombstones = Num;
1099   }
1100 
1101   const BucketT *getInlineBuckets() const {
1102     assert(Small);
1103     // Note that this cast does not violate aliasing rules as we assert that
1104     // the memory's dynamic type is the small, inline bucket buffer, and the
1105     // 'storage.buffer' static type is 'char *'.
1106     return reinterpret_cast<const BucketT *>(storage.buffer);
1107   }
1108 
1109   BucketT *getInlineBuckets() {
1110     return const_cast<BucketT *>(
1111       const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1112   }
1113 
1114   const LargeRep *getLargeRep() const {
1115     assert(!Small);
1116     // Note, same rule about aliasing as with getInlineBuckets.
1117     return reinterpret_cast<const LargeRep *>(storage.buffer);
1118   }
1119 
1120   LargeRep *getLargeRep() {
1121     return const_cast<LargeRep *>(
1122       const_cast<const SmallDenseMap *>(this)->getLargeRep());
1123   }
1124 
1125   const BucketT *getBuckets() const {
1126     return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1127   }
1128 
1129   BucketT *getBuckets() {
1130     return const_cast<BucketT *>(
1131       const_cast<const SmallDenseMap *>(this)->getBuckets());
1132   }
1133 
1134   unsigned getNumBuckets() const {
1135     return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1136   }
1137 
1138   void deallocateBuckets() {
1139     if (Small)
1140       return;
1141 
1142     deallocate_buffer(getLargeRep()->Buckets,
1143                       sizeof(BucketT) * getLargeRep()->NumBuckets,
1144                       alignof(BucketT));
1145     getLargeRep()->~LargeRep();
1146   }
1147 
1148   LargeRep allocateBuckets(unsigned Num) {
1149     assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1150     LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1151                         sizeof(BucketT) * Num, alignof(BucketT))),
1152                     Num};
1153     return Rep;
1154   }
1155 };
1156 
1157 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1158           bool IsConst>
1159 class DenseMapIterator : DebugEpochBase::HandleBase {
1160   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1161   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1162 
1163   using ConstIterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1164 
1165 public:
1166   using difference_type = ptrdiff_t;
1167   using value_type =
1168       typename std::conditional<IsConst, const Bucket, Bucket>::type;
1169   using pointer = value_type *;
1170   using reference = value_type &;
1171   using iterator_category = std::forward_iterator_tag;
1172 
1173 private:
1174   pointer Ptr = nullptr;
1175   pointer End = nullptr;
1176 
1177 public:
1178   DenseMapIterator() = default;
1179 
1180   DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1181                    bool NoAdvance = false)
1182       : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1183     assert(isHandleInSync() && "invalid construction!");
1184 
1185     if (NoAdvance) return;
1186     if (shouldReverseIterate<KeyT>()) {
1187       RetreatPastEmptyBuckets();
1188       return;
1189     }
1190     AdvancePastEmptyBuckets();
1191   }
1192 
1193   // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1194   // for const iterator destinations so it doesn't end up as a user defined copy
1195   // constructor.
1196   template <bool IsConstSrc,
1197             typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
1198   DenseMapIterator(
1199       const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1200       : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1201 
1202   reference operator*() const {
1203     assert(isHandleInSync() && "invalid iterator access!");
1204     if (shouldReverseIterate<KeyT>())
1205       return Ptr[-1];
1206     return *Ptr;
1207   }
1208   pointer operator->() const {
1209     assert(isHandleInSync() && "invalid iterator access!");
1210     if (shouldReverseIterate<KeyT>())
1211       return &(Ptr[-1]);
1212     return Ptr;
1213   }
1214 
1215   bool operator==(const ConstIterator &RHS) const {
1216     assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1217     assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1218     assert(getEpochAddress() == RHS.getEpochAddress() &&
1219            "comparing incomparable iterators!");
1220     return Ptr == RHS.Ptr;
1221   }
1222   bool operator!=(const ConstIterator &RHS) const {
1223     assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1224     assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1225     assert(getEpochAddress() == RHS.getEpochAddress() &&
1226            "comparing incomparable iterators!");
1227     return Ptr != RHS.Ptr;
1228   }
1229 
1230   inline DenseMapIterator& operator++() {  // Preincrement
1231     assert(isHandleInSync() && "invalid iterator access!");
1232     if (shouldReverseIterate<KeyT>()) {
1233       --Ptr;
1234       RetreatPastEmptyBuckets();
1235       return *this;
1236     }
1237     ++Ptr;
1238     AdvancePastEmptyBuckets();
1239     return *this;
1240   }
1241   DenseMapIterator operator++(int) {  // Postincrement
1242     assert(isHandleInSync() && "invalid iterator access!");
1243     DenseMapIterator tmp = *this; ++*this; return tmp;
1244   }
1245 
1246 private:
1247   void AdvancePastEmptyBuckets() {
1248     assert(Ptr <= End);
1249     const KeyT Empty = KeyInfoT::getEmptyKey();
1250     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1251 
1252     while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1253                           KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1254       ++Ptr;
1255   }
1256 
1257   void RetreatPastEmptyBuckets() {
1258     assert(Ptr >= End);
1259     const KeyT Empty = KeyInfoT::getEmptyKey();
1260     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1261 
1262     while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1263                           KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1264       --Ptr;
1265   }
1266 };
1267 
1268 template <typename KeyT, typename ValueT, typename KeyInfoT>
1269 inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1270   return X.getMemorySize();
1271 }
1272 
1273 } // end namespace llvm
1274 
1275 #endif // LLVM_ADT_DENSEMAP_H
1276