1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef ds_OrderedHashTable_h
8 #define ds_OrderedHashTable_h
9 
10 /*
11  * Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
12  * They are like js::HashMap and js::HashSet except that:
13  *
14  *   - Iterating over an Ordered hash table visits the entries in the order in
15  *     which they were inserted. This means that unlike a HashMap, the behavior
16  *     of an OrderedHashMap is deterministic (as long as the HashPolicy methods
17  *     are effect-free and consistent); the hashing is a pure performance
18  *     optimization.
19  *
20  *   - Range objects over Ordered tables remain valid even when entries are
21  *     added or removed or the table is resized. (However in the case of
22  *     removing entries, note the warning on class Range below.)
23  *
24  *   - The API is a little different, so it's not a drop-in replacement.
25  *     In particular, the hash policy is a little different.
26  *     Also, the Ordered templates lack the Ptr and AddPtr types.
27  *
28  * Hash policies
29  *
30  * See the comment about "Hash policy" in HashTable.h for general features that
31  * hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
32  * differ in that the hash() method takes an extra argument:
33  *     static js::HashNumber hash(Lookup, const HashCodeScrambler&);
34  * They must additionally provide a distinguished "empty" key value and the
35  * following static member functions:
36  *     bool isEmpty(const Key&);
37  *     void makeEmpty(Key*);
38  */
39 
40 #include "mozilla/HashFunctions.h"
41 #include "mozilla/Move.h"
42 
43 using mozilla::Forward;
44 using mozilla::Move;
45 
46 namespace js {
47 
48 namespace detail {
49 
50 /*
51  * detail::OrderedHashTable is the underlying data structure used to implement both
52  * OrderedHashMap and OrderedHashSet. Programs should use one of those two
53  * templates rather than OrderedHashTable.
54  */
55 template <class T, class Ops, class AllocPolicy>
56 class OrderedHashTable
57 {
58   public:
59     typedef typename Ops::KeyType Key;
60     typedef typename Ops::Lookup Lookup;
61 
62     struct Data
63     {
64         T element;
65         Data* chain;
66 
DataData67         Data(const T& e, Data* c) : element(e), chain(c) {}
DataData68         Data(T&& e, Data* c) : element(Move(e)), chain(c) {}
69     };
70 
71     class Range;
72     friend class Range;
73 
74   private:
75     Data** hashTable;           // hash table (has hashBuckets() elements)
76     Data* data;                 // data vector, an array of Data objects
77                                 // data[0:dataLength] are constructed
78     uint32_t dataLength;        // number of constructed elements in data
79     uint32_t dataCapacity;      // size of data, in elements
80     uint32_t liveCount;         // dataLength less empty (removed) entries
81     uint32_t hashShift;         // multiplicative hash shift
82     Range* ranges;              // list of all live Ranges on this table
83     AllocPolicy alloc;
84     mozilla::HashCodeScrambler hcs;  // don't reveal pointer hash codes
85 
86   public:
OrderedHashTable(AllocPolicy & ap,mozilla::HashCodeScrambler hcs)87     OrderedHashTable(AllocPolicy& ap, mozilla::HashCodeScrambler hcs)
88         : hashTable(nullptr), data(nullptr), dataLength(0), ranges(nullptr), alloc(ap), hcs(hcs) {}
89 
init()90     MOZ_MUST_USE bool init() {
91         MOZ_ASSERT(!hashTable, "init must be called at most once");
92 
93         uint32_t buckets = initialBuckets();
94         Data** tableAlloc = alloc.template pod_malloc<Data*>(buckets);
95         if (!tableAlloc)
96             return false;
97         for (uint32_t i = 0; i < buckets; i++)
98             tableAlloc[i] = nullptr;
99 
100         uint32_t capacity = uint32_t(buckets * fillFactor());
101         Data* dataAlloc = alloc.template pod_malloc<Data>(capacity);
102         if (!dataAlloc) {
103             alloc.free_(tableAlloc);
104             return false;
105         }
106 
107         // clear() requires that members are assigned only after all allocation
108         // has succeeded, and that this->ranges is left untouched.
109         hashTable = tableAlloc;
110         data = dataAlloc;
111         dataLength = 0;
112         dataCapacity = capacity;
113         liveCount = 0;
114         hashShift = HashNumberSizeBits - initialBucketsLog2();
115         MOZ_ASSERT(hashBuckets() == buckets);
116         return true;
117     }
118 
~OrderedHashTable()119     ~OrderedHashTable() {
120         for (Range* r = ranges; r; ) {
121             Range* next = r->next;
122             r->onTableDestroyed();
123             r = next;
124         }
125         alloc.free_(hashTable);
126         freeData(data, dataLength);
127     }
128 
129     /* Return the number of elements in the table. */
count()130     uint32_t count() const { return liveCount; }
131 
132     /* True if any element matches l. */
has(const Lookup & l)133     bool has(const Lookup& l) const {
134         return lookup(l) != nullptr;
135     }
136 
137     /* Return a pointer to the element, if any, that matches l, or nullptr. */
get(const Lookup & l)138     T* get(const Lookup& l) {
139         Data* e = lookup(l, prepareHash(l));
140         return e ? &e->element : nullptr;
141     }
142 
143     /* Return a pointer to the element, if any, that matches l, or nullptr. */
get(const Lookup & l)144     const T* get(const Lookup& l) const {
145         return const_cast<OrderedHashTable*>(this)->get(l);
146     }
147 
148     /*
149      * If the table already contains an entry that matches |element|,
150      * replace that entry with |element|. Otherwise add a new entry.
151      *
152      * On success, return true, whether there was already a matching element or
153      * not. On allocation failure, return false. If this returns false, it
154      * means the element was not added to the table.
155      */
156     template <typename ElementInput>
put(ElementInput && element)157     MOZ_MUST_USE bool put(ElementInput&& element) {
158         HashNumber h = prepareHash(Ops::getKey(element));
159         if (Data* e = lookup(Ops::getKey(element), h)) {
160             e->element = Forward<ElementInput>(element);
161             return true;
162         }
163 
164         if (dataLength == dataCapacity) {
165             // If the hashTable is more than 1/4 deleted data, simply rehash in
166             // place to free up some space. Otherwise, grow the table.
167             uint32_t newHashShift = liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
168             if (!rehash(newHashShift))
169                 return false;
170         }
171 
172         h >>= hashShift;
173         liveCount++;
174         Data* e = &data[dataLength++];
175         new (e) Data(Forward<ElementInput>(element), hashTable[h]);
176         hashTable[h] = e;
177         return true;
178     }
179 
180     /*
181      * If the table contains an element matching l, remove it and set *foundp
182      * to true. Otherwise set *foundp to false.
183      *
184      * Return true on success, false if we tried to shrink the table and hit an
185      * allocation failure. Even if this returns false, *foundp is set correctly
186      * and the matching element was removed. Shrinking is an optimization and
187      * it's OK for it to fail.
188      */
remove(const Lookup & l,bool * foundp)189     bool remove(const Lookup& l, bool* foundp) {
190         // Note: This could be optimized so that removing the last entry,
191         // data[dataLength - 1], decrements dataLength. LIFO use cases would
192         // benefit.
193 
194         // If a matching entry exists, empty it.
195         Data* e = lookup(l, prepareHash(l));
196         if (e == nullptr) {
197             *foundp = false;
198             return true;
199         }
200 
201         *foundp = true;
202         liveCount--;
203         Ops::makeEmpty(&e->element);
204 
205         // Update active Ranges.
206         uint32_t pos = e - data;
207         for (Range* r = ranges; r; r = r->next)
208             r->onRemove(pos);
209 
210         // If many entries have been removed, try to shrink the table.
211         if (hashBuckets() > initialBuckets() && liveCount < dataLength * minDataFill()) {
212             if (!rehash(hashShift + 1))
213                 return false;
214         }
215         return true;
216     }
217 
218     /*
219      * Remove all entries.
220      *
221      * Returns false on OOM, leaving the OrderedHashTable and any live Ranges
222      * in the old state.
223      *
224      * The effect on live Ranges is the same as removing all entries; in
225      * particular, those Ranges are still live and will see any entries added
226      * after a successful clear().
227      */
clear()228     MOZ_MUST_USE bool clear() {
229         if (dataLength != 0) {
230             Data** oldHashTable = hashTable;
231             Data* oldData = data;
232             uint32_t oldDataLength = dataLength;
233 
234             hashTable = nullptr;
235             if (!init()) {
236                 // init() only mutates members on success; see comment above.
237                 hashTable = oldHashTable;
238                 return false;
239             }
240 
241             alloc.free_(oldHashTable);
242             freeData(oldData, oldDataLength);
243             for (Range* r = ranges; r; r = r->next)
244                 r->onClear();
245         }
246 
247         MOZ_ASSERT(hashTable);
248         MOZ_ASSERT(data);
249         MOZ_ASSERT(dataLength == 0);
250         MOZ_ASSERT(liveCount == 0);
251         return true;
252     }
253 
254     /*
255      * Ranges are used to iterate over OrderedHashTables.
256      *
257      * Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
258      * Then you can walk all the key-value pairs like this:
259      *
260      *     for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
261      *         Map::Entry& pair = r.front();
262      *         ... do something with pair ...
263      *     }
264      *
265      * Ranges remain valid for the lifetime of the OrderedHashTable, even if
266      * entries are added or removed or the table is resized. Don't do anything
267      * to a Range, except destroy it, after the OrderedHashTable has been
268      * destroyed. (We support destroying the two objects in either order to
269      * humor the GC, bless its nondeterministic heart.)
270      *
271      * Warning: The behavior when the current front() entry is removed from the
272      * table is subtly different from js::HashTable<>::Enum::removeFront()!
273      * HashTable::Enum doesn't skip any entries when you removeFront() and then
274      * popFront(). OrderedHashTable::Range does! (This is useful for using a
275      * Range to implement JS Map.prototype.iterator.)
276      *
277      * The workaround is to call popFront() as soon as possible,
278      * before there's any possibility of modifying the table:
279      *
280      *     for (Map::Range r = map.all(); !r.empty(); ) {
281      *         Key key = r.front().key;         // this won't modify map
282      *         Value val = r.front().value;     // this won't modify map
283      *         r.popFront();
284      *         // ...do things that might modify map...
285      *     }
286      */
287     class Range
288     {
289         friend class OrderedHashTable;
290 
291         // Cannot be a reference since we need to be able to do
292         // |offsetof(Range, ht)|.
293         OrderedHashTable* ht;
294 
295         /* The index of front() within ht->data. */
296         uint32_t i;
297 
298         /*
299          * The number of nonempty entries in ht->data to the left of front().
300          * This is used when the table is resized or compacted.
301          */
302         uint32_t count;
303 
304         /*
305          * Links in the doubly-linked list of active Ranges on ht.
306          *
307          * prevp points to the previous Range's .next field;
308          *   or to ht->ranges if this is the first Range in the list.
309          * next points to the next Range;
310          *   or nullptr if this is the last Range in the list.
311          *
312          * Invariant: *prevp == this.
313          */
314         Range** prevp;
315         Range* next;
316 
317         /*
318          * Create a Range over all the entries in ht.
319          * (This is private on purpose. End users must use ht->all().)
320          */
Range(OrderedHashTable * ht)321         explicit Range(OrderedHashTable* ht) : ht(ht), i(0), count(0), prevp(&ht->ranges), next(ht->ranges) {
322             *prevp = this;
323             if (next)
324                 next->prevp = &next;
325             seek();
326         }
327 
328       public:
Range(const Range & other)329         Range(const Range& other)
330             : ht(other.ht), i(other.i), count(other.count), prevp(&ht->ranges), next(ht->ranges)
331         {
332             *prevp = this;
333             if (next)
334                 next->prevp = &next;
335         }
336 
~Range()337         ~Range() {
338             *prevp = next;
339             if (next)
340                 next->prevp = prevp;
341         }
342 
343       private:
344         // Prohibit copy assignment.
345         Range& operator=(const Range& other) = delete;
346 
seek()347         void seek() {
348             while (i < ht->dataLength && Ops::isEmpty(Ops::getKey(ht->data[i].element)))
349                 i++;
350         }
351 
352         /*
353          * The hash table calls this when an entry is removed.
354          * j is the index of the removed entry.
355          */
onRemove(uint32_t j)356         void onRemove(uint32_t j) {
357             MOZ_ASSERT(valid());
358             if (j < i)
359                 count--;
360             if (j == i)
361                 seek();
362         }
363 
364         /*
365          * The hash table calls this when the table is resized or compacted.
366          * Since |count| is the number of nonempty entries to the left of
367          * front(), discarding the empty entries will not affect count, and it
368          * will make i and count equal.
369          */
onCompact()370         void onCompact() {
371             MOZ_ASSERT(valid());
372             i = count;
373         }
374 
375         /* The hash table calls this when cleared. */
onClear()376         void onClear() {
377             MOZ_ASSERT(valid());
378             i = count = 0;
379         }
380 
valid()381         bool valid() const {
382             return next != this;
383         }
384 
onTableDestroyed()385         void onTableDestroyed() {
386             MOZ_ASSERT(valid());
387             prevp = &next;
388             next = this;
389         }
390 
391       public:
empty()392         bool empty() const {
393             MOZ_ASSERT(valid());
394             return i >= ht->dataLength;
395         }
396 
397         /*
398          * Return the first element in the range. This must not be called if
399          * this->empty().
400          *
401          * Warning: Removing an entry from the table also removes it from any
402          * live Ranges, and a Range can become empty that way, rendering
403          * front() invalid. If in doubt, check empty() before calling front().
404          */
front()405         T& front() {
406             MOZ_ASSERT(valid());
407             MOZ_ASSERT(!empty());
408             return ht->data[i].element;
409         }
410 
411         /*
412          * Remove the first element from this range.
413          * This must not be called if this->empty().
414          *
415          * Warning: Removing an entry from the table also removes it from any
416          * live Ranges, and a Range can become empty that way, rendering
417          * popFront() invalid. If in doubt, check empty() before calling
418          * popFront().
419          */
popFront()420         void popFront() {
421             MOZ_ASSERT(valid());
422             MOZ_ASSERT(!empty());
423             MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht->data[i].element)));
424             count++;
425             i++;
426             seek();
427         }
428 
429         /*
430          * Change the key of the front entry.
431          *
432          * This calls Ops::hash on both the current key and the new key.
433          * Ops::hash on the current key must return the same hash code as
434          * when the entry was added to the table.
435          */
rekeyFront(const Key & k)436         void rekeyFront(const Key& k) {
437             MOZ_ASSERT(valid());
438             Data& entry = ht->data[i];
439             HashNumber oldHash = ht->prepareHash(Ops::getKey(entry.element)) >> ht->hashShift;
440             HashNumber newHash = ht->prepareHash(k) >> ht->hashShift;
441             Ops::setKey(entry.element, k);
442             if (newHash != oldHash) {
443                 // Remove this entry from its old hash chain. (If this crashes
444                 // reading nullptr, it would mean we did not find this entry on
445                 // the hash chain where we expected it. That probably means the
446                 // key's hash code changed since it was inserted, breaking the
447                 // hash code invariant.)
448                 Data** ep = &ht->hashTable[oldHash];
449                 while (*ep != &entry)
450                     ep = &(*ep)->chain;
451                 *ep = entry.chain;
452 
453                 // Add it to the new hash chain. We could just insert it at the
454                 // beginning of the chain. Instead, we do a bit of work to
455                 // preserve the invariant that hash chains always go in reverse
456                 // insertion order (descending memory order). No code currently
457                 // depends on this invariant, so it's fine to kill it if
458                 // needed.
459                 ep = &ht->hashTable[newHash];
460                 while (*ep && *ep > &entry)
461                     ep = &(*ep)->chain;
462                 entry.chain = *ep;
463                 *ep = &entry;
464             }
465         }
466 
offsetOfHashTable()467         static size_t offsetOfHashTable() {
468             return offsetof(Range, ht);
469         }
offsetOfI()470         static size_t offsetOfI() {
471             return offsetof(Range, i);
472         }
offsetOfCount()473         static size_t offsetOfCount() {
474             return offsetof(Range, count);
475         }
offsetOfPrevP()476         static size_t offsetOfPrevP() {
477             return offsetof(Range, prevp);
478         }
offsetOfNext()479         static size_t offsetOfNext() {
480             return offsetof(Range, next);
481         }
482     };
483 
all()484     Range all() { return Range(this); }
485 
486     /*
487      * Change the value of the given key.
488      *
489      * This calls Ops::hash on both the current key and the new key.
490      * Ops::hash on the current key must return the same hash code as
491      * when the entry was added to the table.
492      */
rekeyOneEntry(const Key & current,const Key & newKey,const T & element)493     void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
494         if (current == newKey)
495             return;
496 
497         Data* entry = lookup(current, prepareHash(current));
498         if (!entry)
499             return;
500 
501         HashNumber oldHash = prepareHash(current) >> hashShift;
502         HashNumber newHash = prepareHash(newKey) >> hashShift;
503 
504         entry->element = element;
505 
506         // Remove this entry from its old hash chain. (If this crashes
507         // reading nullptr, it would mean we did not find this entry on
508         // the hash chain where we expected it. That probably means the
509         // key's hash code changed since it was inserted, breaking the
510         // hash code invariant.)
511         Data** ep = &hashTable[oldHash];
512         while (*ep != entry)
513             ep = &(*ep)->chain;
514         *ep = entry->chain;
515 
516         // Add it to the new hash chain. We could just insert it at the
517         // beginning of the chain. Instead, we do a bit of work to
518         // preserve the invariant that hash chains always go in reverse
519         // insertion order (descending memory order). No code currently
520         // depends on this invariant, so it's fine to kill it if
521         // needed.
522         ep = &hashTable[newHash];
523         while (*ep && *ep > entry)
524             ep = &(*ep)->chain;
525         entry->chain = *ep;
526         *ep = entry;
527     }
528 
offsetOfDataLength()529     static size_t offsetOfDataLength() {
530         return offsetof(OrderedHashTable, dataLength);
531     }
offsetOfData()532     static size_t offsetOfData() {
533         return offsetof(OrderedHashTable, data);
534     }
offsetOfDataElement()535     static constexpr size_t offsetOfDataElement() {
536         return offsetof(Data, element);
537     }
sizeofData()538     static constexpr size_t sizeofData() {
539         return sizeof(Data);
540     }
541 
542   private:
543     /* Logarithm base 2 of the number of buckets in the hash table initially. */
initialBucketsLog2()544     static uint32_t initialBucketsLog2() { return 1; }
initialBuckets()545     static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
546 
547     /*
548      * The maximum load factor (mean number of entries per bucket).
549      * It is an invariant that
550      *     dataCapacity == floor(hashBuckets() * fillFactor()).
551      *
552      * The fill factor should be between 2 and 4, and it should be chosen so that
553      * the fill factor times sizeof(Data) is close to but <= a power of 2.
554      * This fixed fill factor was chosen to make the size of the data
555      * array, in bytes, close to a power of two when sizeof(T) is 16.
556      */
fillFactor()557     static double fillFactor() { return 8.0 / 3.0; }
558 
559     /*
560      * The minimum permitted value of (liveCount / dataLength).
561      * If that ratio drops below this value, we shrink the table.
562      */
minDataFill()563     static double minDataFill() { return 0.25; }
564 
565   public:
prepareHash(const Lookup & l)566     HashNumber prepareHash(const Lookup& l) const {
567         return ScrambleHashCode(Ops::hash(l, hcs));
568     }
569 
570   private:
571     /* The size of hashTable, in elements. Always a power of two. */
hashBuckets()572     uint32_t hashBuckets() const {
573         return 1 << (HashNumberSizeBits - hashShift);
574     }
575 
destroyData(Data * data,uint32_t length)576     static void destroyData(Data* data, uint32_t length) {
577         for (Data* p = data + length; p != data; )
578             (--p)->~Data();
579     }
580 
freeData(Data * data,uint32_t length)581     void freeData(Data* data, uint32_t length) {
582         destroyData(data, length);
583         alloc.free_(data);
584     }
585 
lookup(const Lookup & l,HashNumber h)586     Data* lookup(const Lookup& l, HashNumber h) {
587         for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
588             if (Ops::match(Ops::getKey(e->element), l))
589                 return e;
590         }
591         return nullptr;
592     }
593 
lookup(const Lookup & l)594     const Data* lookup(const Lookup& l) const {
595         return const_cast<OrderedHashTable*>(this)->lookup(l, prepareHash(l));
596     }
597 
598     /* This is called after rehashing the table. */
compacted()599     void compacted() {
600         // If we had any empty entries, compacting may have moved live entries
601         // to the left within |data|. Notify all live Ranges of the change.
602         for (Range* r = ranges; r; r = r->next)
603             r->onCompact();
604     }
605 
606     /* Compact the entries in |data| and rehash them. */
rehashInPlace()607     void rehashInPlace() {
608         for (uint32_t i = 0, N = hashBuckets(); i < N; i++)
609             hashTable[i] = nullptr;
610         Data* wp = data;
611         Data* end = data + dataLength;
612         for (Data* rp = data; rp != end; rp++) {
613             if (!Ops::isEmpty(Ops::getKey(rp->element))) {
614                 HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
615                 if (rp != wp)
616                     wp->element = Move(rp->element);
617                 wp->chain = hashTable[h];
618                 hashTable[h] = wp;
619                 wp++;
620             }
621         }
622         MOZ_ASSERT(wp == data + liveCount);
623 
624         while (wp != end)
625             (--end)->~Data();
626         dataLength = liveCount;
627         compacted();
628     }
629 
630     /*
631      * Grow, shrink, or compact both |hashTable| and |data|.
632      *
633      * On success, this returns true, dataLength == liveCount, and there are no
634      * empty elements in data[0:dataLength]. On allocation failure, this
635      * leaves everything as it was and returns false.
636      */
rehash(uint32_t newHashShift)637     MOZ_MUST_USE bool rehash(uint32_t newHashShift) {
638         // If the size of the table is not changing, rehash in place to avoid
639         // allocating memory.
640         if (newHashShift == hashShift) {
641             rehashInPlace();
642             return true;
643         }
644 
645         size_t newHashBuckets =
646             size_t(1) << (HashNumberSizeBits - newHashShift);
647         Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
648         if (!newHashTable)
649             return false;
650         for (uint32_t i = 0; i < newHashBuckets; i++)
651             newHashTable[i] = nullptr;
652 
653         uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
654         Data* newData = alloc.template pod_malloc<Data>(newCapacity);
655         if (!newData) {
656             alloc.free_(newHashTable);
657             return false;
658         }
659 
660         Data* wp = newData;
661         Data* end = data + dataLength;
662         for (Data* p = data; p != end; p++) {
663             if (!Ops::isEmpty(Ops::getKey(p->element))) {
664                 HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
665                 new (wp) Data(Move(p->element), newHashTable[h]);
666                 newHashTable[h] = wp;
667                 wp++;
668             }
669         }
670         MOZ_ASSERT(wp == newData + liveCount);
671 
672         alloc.free_(hashTable);
673         freeData(data, dataLength);
674 
675         hashTable = newHashTable;
676         data = newData;
677         dataLength = liveCount;
678         dataCapacity = newCapacity;
679         hashShift = newHashShift;
680         MOZ_ASSERT(hashBuckets() == newHashBuckets);
681 
682         compacted();
683         return true;
684     }
685 
686     // Not copyable.
687     OrderedHashTable& operator=(const OrderedHashTable&) = delete;
688     OrderedHashTable(const OrderedHashTable&) = delete;
689 };
690 
691 }  // namespace detail
692 
693 template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
694 class OrderedHashMap
695 {
696   public:
697     class Entry
698     {
699         template <class, class, class> friend class detail::OrderedHashTable;
700         void operator=(const Entry& rhs) {
701             const_cast<Key&>(key) = rhs.key;
702             value = rhs.value;
703         }
704 
705         void operator=(Entry&& rhs) {
706             MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
707             const_cast<Key&>(key) = Move(rhs.key);
708             value = Move(rhs.value);
709         }
710 
711       public:
Entry()712         Entry() : key(), value() {}
713         template <typename V>
Entry(const Key & k,V && v)714         Entry(const Key& k, V&& v) : key(k), value(Forward<V>(v)) {}
Entry(Entry && rhs)715         Entry(Entry&& rhs) : key(Move(rhs.key)), value(Move(rhs.value)) {}
716 
717         const Key key;
718         Value value;
719 
offsetOfKey()720         static size_t offsetOfKey() {
721             return offsetof(Entry, key);
722         }
offsetOfValue()723         static size_t offsetOfValue() {
724             return offsetof(Entry, value);
725         }
726     };
727 
728   private:
729     struct MapOps : OrderedHashPolicy
730     {
731         typedef Key KeyType;
makeEmptyMapOps732         static void makeEmpty(Entry* e) {
733             OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
734 
735             // Clear the value. Destroying it is another possibility, but that
736             // would complicate class Entry considerably.
737             e->value = Value();
738         }
getKeyMapOps739         static const Key& getKey(const Entry& e) { return e.key; }
setKeyMapOps740         static void setKey(Entry& e, const Key& k) { const_cast<Key&>(e.key) = k; }
741     };
742 
743     typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
744     Impl impl;
745 
746   public:
747     typedef typename Impl::Range Range;
748 
OrderedHashMap(AllocPolicy ap,mozilla::HashCodeScrambler hcs)749     OrderedHashMap(AllocPolicy ap, mozilla::HashCodeScrambler hcs) : impl(ap, hcs) {}
init()750     MOZ_MUST_USE bool init()                        { return impl.init(); }
count()751     uint32_t count() const                          { return impl.count(); }
has(const Key & key)752     bool has(const Key& key) const                  { return impl.has(key); }
all()753     Range all()                                     { return impl.all(); }
get(const Key & key)754     const Entry* get(const Key& key) const          { return impl.get(key); }
get(const Key & key)755     Entry* get(const Key& key)                      { return impl.get(key); }
remove(const Key & key,bool * foundp)756     bool remove(const Key& key, bool* foundp)       { return impl.remove(key, foundp); }
clear()757     MOZ_MUST_USE bool clear()                       { return impl.clear(); }
758 
759     template <typename V>
put(const Key & key,V && value)760     MOZ_MUST_USE bool put(const Key& key, V&& value) {
761         return impl.put(Entry(key, Forward<V>(value)));
762     }
763 
hash(const Key & key)764     HashNumber hash(const Key& key) const { return impl.prepareHash(key); }
765 
rekeyOneEntry(const Key & current,const Key & newKey)766     void rekeyOneEntry(const Key& current, const Key& newKey) {
767         const Entry* e = get(current);
768         if (!e)
769             return;
770         return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
771     }
772 
offsetOfEntryKey()773     static size_t offsetOfEntryKey() {
774         return Entry::offsetOfKey();
775     }
offsetOfImplDataLength()776     static size_t offsetOfImplDataLength() {
777         return Impl::offsetOfDataLength();
778     }
offsetOfImplData()779     static size_t offsetOfImplData() {
780         return Impl::offsetOfData();
781     }
offsetOfImplDataElement()782     static constexpr size_t offsetOfImplDataElement() {
783         return Impl::offsetOfDataElement();
784     }
sizeofImplData()785     static constexpr size_t sizeofImplData() {
786         return Impl::sizeofData();
787     }
788 };
789 
790 template <class T, class OrderedHashPolicy, class AllocPolicy>
791 class OrderedHashSet
792 {
793   private:
794     struct SetOps : OrderedHashPolicy
795     {
796         typedef const T KeyType;
getKeySetOps797         static const T& getKey(const T& v) { return v; }
setKeySetOps798         static void setKey(const T& e, const T& v) { const_cast<T&>(e) = v; }
799     };
800 
801     typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
802     Impl impl;
803 
804   public:
805     typedef typename Impl::Range Range;
806 
OrderedHashSet(AllocPolicy ap,mozilla::HashCodeScrambler hcs)807     explicit OrderedHashSet(AllocPolicy ap, mozilla::HashCodeScrambler hcs) : impl(ap, hcs) {}
init()808     MOZ_MUST_USE bool init()                        { return impl.init(); }
count()809     uint32_t count() const                          { return impl.count(); }
has(const T & value)810     bool has(const T& value) const                  { return impl.has(value); }
all()811     Range all()                                     { return impl.all(); }
put(const T & value)812     MOZ_MUST_USE bool put(const T& value)           { return impl.put(value); }
remove(const T & value,bool * foundp)813     bool remove(const T& value, bool* foundp)       { return impl.remove(value, foundp); }
clear()814     MOZ_MUST_USE bool clear()                       { return impl.clear(); }
815 
hash(const T & value)816     HashNumber hash(const T& value) const { return impl.prepareHash(value); }
817 
rekeyOneEntry(const T & current,const T & newKey)818     void rekeyOneEntry(const T& current, const T& newKey) {
819         return impl.rekeyOneEntry(current, newKey, newKey);
820     }
821 
offsetOfEntryKey()822     static size_t offsetOfEntryKey() {
823         return 0;
824     }
offsetOfImplDataLength()825     static size_t offsetOfImplDataLength() {
826         return Impl::offsetOfDataLength();
827     }
offsetOfImplData()828     static size_t offsetOfImplData() {
829         return Impl::offsetOfData();
830     }
offsetOfImplDataElement()831     static constexpr size_t offsetOfImplDataElement() {
832         return Impl::offsetOfDataElement();
833     }
sizeofImplData()834     static constexpr size_t sizeofImplData() {
835         return Impl::sizeofData();
836     }
837 };
838 
839 }  // namespace js
840 
841 #endif /* ds_OrderedHashTable_h */
842