1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef ds_OrderedHashTable_h
8 #define ds_OrderedHashTable_h
9 
10 /*
11  * Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
12  * They are like js::HashMap and js::HashSet except that:
13  *
14  *   - Iterating over an Ordered hash table visits the entries in the order in
15  *     which they were inserted. This means that unlike a HashMap, the behavior
16  *     of an OrderedHashMap is deterministic (as long as the HashPolicy methods
17  *     are effect-free and consistent); the hashing is a pure performance
18  *     optimization.
19  *
20  *   - Range objects over Ordered tables remain valid even when entries are
21  *     added or removed or the table is resized. (However in the case of
22  *     removing entries, note the warning on class Range below.)
23  *
24  *   - The API is a little different, so it's not a drop-in replacement.
25  *     In particular, the hash policy is a little different.
26  *     Also, the Ordered templates lack the Ptr and AddPtr types.
27  *
28  * Hash policies
29  *
30  * See the comment about "Hash policy" in HashTable.h for general features that
31  * hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
32  * differ in that the hash() method takes an extra argument:
33  *     static js::HashNumber hash(Lookup, const HashCodeScrambler&);
34  * They must additionally provide a distinguished "empty" key value and the
35  * following static member functions:
36  *     bool isEmpty(const Key&);
37  *     void makeEmpty(Key*);
38  */
39 
40 #include "mozilla/HashFunctions.h"
41 #include "mozilla/MemoryReporting.h"
42 
43 #include <utility>
44 
45 #include "js/HashTable.h"
46 
47 namespace js {
48 
49 namespace detail {
50 
51 /*
52  * detail::OrderedHashTable is the underlying data structure used to implement
53  * both OrderedHashMap and OrderedHashSet. Programs should use one of those two
54  * templates rather than OrderedHashTable.
55  */
56 template <class T, class Ops, class AllocPolicy>
57 class OrderedHashTable {
58  public:
59   using Key = typename Ops::KeyType;
60   using Lookup = typename Ops::Lookup;
61 
62   struct Data {
63     T element;
64     Data* chain;
65 
DataData66     Data(const T& e, Data* c) : element(e), chain(c) {}
DataData67     Data(T&& e, Data* c) : element(std::move(e)), chain(c) {}
68   };
69 
70   class Range;
71   friend class Range;
72 
73  private:
74   Data** hashTable;       // hash table (has hashBuckets() elements)
75   Data* data;             // data vector, an array of Data objects
76                           // data[0:dataLength] are constructed
77   uint32_t dataLength;    // number of constructed elements in data
78   uint32_t dataCapacity;  // size of data, in elements
79   uint32_t liveCount;     // dataLength less empty (removed) entries
80   uint32_t hashShift;     // multiplicative hash shift
81   Range* ranges;  // list of all live Ranges on this table in malloc memory
82   Range*
83       nurseryRanges;  // list of all live Ranges on this table in the GC nursery
84   AllocPolicy alloc;
85   mozilla::HashCodeScrambler hcs;  // don't reveal pointer hash codes
86 
87   // TODO: This should be templated on a functor type and receive lambda
88   // arguments but this causes problems for the hazard analysis builds. See
89   // bug 1398213.
90   template <void (*f)(Range* range, uint32_t arg)>
91   void forEachRange(uint32_t arg = 0) {
92     Range* next;
93     for (Range* r = ranges; r; r = next) {
94       next = r->next;
95       f(r, arg);
96     }
97     for (Range* r = nurseryRanges; r; r = next) {
98       next = r->next;
99       f(r, arg);
100     }
101   }
102 
103  public:
OrderedHashTable(AllocPolicy ap,mozilla::HashCodeScrambler hcs)104   OrderedHashTable(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
105       : hashTable(nullptr),
106         data(nullptr),
107         dataLength(0),
108         dataCapacity(0),
109         liveCount(0),
110         hashShift(0),
111         ranges(nullptr),
112         nurseryRanges(nullptr),
113         alloc(std::move(ap)),
114         hcs(hcs) {}
115 
init()116   [[nodiscard]] bool init() {
117     MOZ_ASSERT(!hashTable, "init must be called at most once");
118 
119     uint32_t buckets = initialBuckets();
120     Data** tableAlloc = alloc.template pod_malloc<Data*>(buckets);
121     if (!tableAlloc) {
122       return false;
123     }
124     for (uint32_t i = 0; i < buckets; i++) {
125       tableAlloc[i] = nullptr;
126     }
127 
128     uint32_t capacity = uint32_t(buckets * fillFactor());
129     Data* dataAlloc = alloc.template pod_malloc<Data>(capacity);
130     if (!dataAlloc) {
131       alloc.free_(tableAlloc, buckets);
132       return false;
133     }
134 
135     // clear() requires that members are assigned only after all allocation
136     // has succeeded, and that this->ranges is left untouched.
137     hashTable = tableAlloc;
138     data = dataAlloc;
139     dataLength = 0;
140     dataCapacity = capacity;
141     liveCount = 0;
142     hashShift = js::kHashNumberBits - initialBucketsLog2();
143     MOZ_ASSERT(hashBuckets() == buckets);
144     return true;
145   }
146 
~OrderedHashTable()147   ~OrderedHashTable() {
148     forEachRange<Range::onTableDestroyed>();
149     if (hashTable) {
150       // |hashBuckets()| isn't valid when |hashTable| hasn't been created.
151       alloc.free_(hashTable, hashBuckets());
152     }
153     freeData(data, dataLength, dataCapacity);
154   }
155 
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)156   size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
157     size_t size = 0;
158     if (hashTable) {
159       size += mallocSizeOf(hashTable);
160     }
161     if (data) {
162       size += mallocSizeOf(data);
163     }
164     return size;
165   }
166 
167   /* Return the number of elements in the table. */
count()168   uint32_t count() const { return liveCount; }
169 
170   /* True if any element matches l. */
has(const Lookup & l)171   bool has(const Lookup& l) const { return lookup(l) != nullptr; }
172 
173   /* Return a pointer to the element, if any, that matches l, or nullptr. */
get(const Lookup & l)174   T* get(const Lookup& l) {
175     Data* e = lookup(l, prepareHash(l));
176     return e ? &e->element : nullptr;
177   }
178 
179   /* Return a pointer to the element, if any, that matches l, or nullptr. */
get(const Lookup & l)180   const T* get(const Lookup& l) const {
181     return const_cast<OrderedHashTable*>(this)->get(l);
182   }
183 
184   /*
185    * If the table already contains an entry that matches |element|,
186    * replace that entry with |element|. Otherwise add a new entry.
187    *
188    * On success, return true, whether there was already a matching element or
189    * not. On allocation failure, return false. If this returns false, it
190    * means the element was not added to the table.
191    */
192   template <typename ElementInput>
put(ElementInput && element)193   [[nodiscard]] bool put(ElementInput&& element) {
194     HashNumber h = prepareHash(Ops::getKey(element));
195     if (Data* e = lookup(Ops::getKey(element), h)) {
196       e->element = std::forward<ElementInput>(element);
197       return true;
198     }
199 
200     if (dataLength == dataCapacity) {
201       // If the hashTable is more than 1/4 deleted data, simply rehash in
202       // place to free up some space. Otherwise, grow the table.
203       uint32_t newHashShift =
204           liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
205       if (!rehash(newHashShift)) {
206         return false;
207       }
208     }
209 
210     h >>= hashShift;
211     liveCount++;
212     Data* e = &data[dataLength++];
213     new (e) Data(std::forward<ElementInput>(element), hashTable[h]);
214     hashTable[h] = e;
215     return true;
216   }
217 
218   /*
219    * If the table contains an element matching l, remove it and set *foundp
220    * to true. Otherwise set *foundp to false.
221    *
222    * Return true on success, false if we tried to shrink the table and hit an
223    * allocation failure. Even if this returns false, *foundp is set correctly
224    * and the matching element was removed. Shrinking is an optimization and
225    * it's OK for it to fail.
226    */
remove(const Lookup & l,bool * foundp)227   bool remove(const Lookup& l, bool* foundp) {
228     // Note: This could be optimized so that removing the last entry,
229     // data[dataLength - 1], decrements dataLength. LIFO use cases would
230     // benefit.
231 
232     // If a matching entry exists, empty it.
233     Data* e = lookup(l, prepareHash(l));
234     if (e == nullptr) {
235       *foundp = false;
236       return true;
237     }
238 
239     *foundp = true;
240     liveCount--;
241     Ops::makeEmpty(&e->element);
242 
243     // Update active Ranges.
244     uint32_t pos = e - data;
245     forEachRange<&Range::onRemove>(pos);
246 
247     // If many entries have been removed, try to shrink the table.
248     if (hashBuckets() > initialBuckets() &&
249         liveCount < dataLength * minDataFill()) {
250       if (!rehash(hashShift + 1)) {
251         return false;
252       }
253     }
254     return true;
255   }
256 
257   /*
258    * Remove all entries.
259    *
260    * Returns false on OOM, leaving the OrderedHashTable and any live Ranges
261    * in the old state.
262    *
263    * The effect on live Ranges is the same as removing all entries; in
264    * particular, those Ranges are still live and will see any entries added
265    * after a successful clear().
266    */
clear()267   [[nodiscard]] bool clear() {
268     if (dataLength != 0) {
269       Data** oldHashTable = hashTable;
270       Data* oldData = data;
271       uint32_t oldHashBuckets = hashBuckets();
272       uint32_t oldDataLength = dataLength;
273       uint32_t oldDataCapacity = dataCapacity;
274 
275       hashTable = nullptr;
276       if (!init()) {
277         // init() only mutates members on success; see comment above.
278         hashTable = oldHashTable;
279         return false;
280       }
281 
282       alloc.free_(oldHashTable, oldHashBuckets);
283       freeData(oldData, oldDataLength, oldDataCapacity);
284       forEachRange<&Range::onClear>();
285     }
286 
287     MOZ_ASSERT(hashTable);
288     MOZ_ASSERT(data);
289     MOZ_ASSERT(dataLength == 0);
290     MOZ_ASSERT(liveCount == 0);
291     return true;
292   }
293 
294   /*
295    * Ranges are used to iterate over OrderedHashTables.
296    *
297    * Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
298    * Then you can walk all the key-value pairs like this:
299    *
300    *     for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
301    *         Map::Entry& pair = r.front();
302    *         ... do something with pair ...
303    *     }
304    *
305    * Ranges remain valid for the lifetime of the OrderedHashTable, even if
306    * entries are added or removed or the table is resized. Don't do anything
307    * to a Range, except destroy it, after the OrderedHashTable has been
308    * destroyed. (We support destroying the two objects in either order to
309    * humor the GC, bless its nondeterministic heart.)
310    *
311    * Warning: The behavior when the current front() entry is removed from the
312    * table is subtly different from js::HashTable<>::Enum::removeFront()!
313    * HashTable::Enum doesn't skip any entries when you removeFront() and then
314    * popFront(). OrderedHashTable::Range does! (This is useful for using a
315    * Range to implement JS Map.prototype.iterator.)
316    *
317    * The workaround is to call popFront() as soon as possible,
318    * before there's any possibility of modifying the table:
319    *
320    *     for (Map::Range r = map.all(); !r.empty(); ) {
321    *         Key key = r.front().key;         // this won't modify map
322    *         Value val = r.front().value;     // this won't modify map
323    *         r.popFront();
324    *         // ...do things that might modify map...
325    *     }
326    */
327   class Range {
328     friend class OrderedHashTable;
329 
330     // Cannot be a reference since we need to be able to do
331     // |offsetof(Range, ht)|.
332     OrderedHashTable* ht;
333 
334     /* The index of front() within ht->data. */
335     uint32_t i;
336 
337     /*
338      * The number of nonempty entries in ht->data to the left of front().
339      * This is used when the table is resized or compacted.
340      */
341     uint32_t count;
342 
343     /*
344      * Links in the doubly-linked list of active Ranges on ht.
345      *
346      * prevp points to the previous Range's .next field;
347      *   or to ht->ranges if this is the first Range in the list.
348      * next points to the next Range;
349      *   or nullptr if this is the last Range in the list.
350      *
351      * Invariant: *prevp == this.
352      */
353     Range** prevp;
354     Range* next;
355 
356     /*
357      * Create a Range over all the entries in ht.
358      * (This is private on purpose. End users must use ht->all().)
359      */
Range(OrderedHashTable * ht,Range ** listp)360     Range(OrderedHashTable* ht, Range** listp)
361         : ht(ht), i(0), count(0), prevp(listp), next(*listp) {
362       *prevp = this;
363       if (next) {
364         next->prevp = &next;
365       }
366       seek();
367     }
368 
369    public:
Range(const Range & other)370     Range(const Range& other)
371         : ht(other.ht),
372           i(other.i),
373           count(other.count),
374           prevp(&ht->ranges),
375           next(ht->ranges) {
376       *prevp = this;
377       if (next) {
378         next->prevp = &next;
379       }
380     }
381 
~Range()382     ~Range() {
383       *prevp = next;
384       if (next) {
385         next->prevp = prevp;
386       }
387     }
388 
389    private:
390     // Prohibit copy assignment.
391     Range& operator=(const Range& other) = delete;
392 
seek()393     void seek() {
394       while (i < ht->dataLength &&
395              Ops::isEmpty(Ops::getKey(ht->data[i].element))) {
396         i++;
397       }
398     }
399 
400     /*
401      * The hash table calls this when an entry is removed.
402      * j is the index of the removed entry.
403      */
onRemove(uint32_t j)404     void onRemove(uint32_t j) {
405       MOZ_ASSERT(valid());
406       if (j < i) {
407         count--;
408       }
409       if (j == i) {
410         seek();
411       }
412     }
413 
414     /*
415      * The hash table calls this when the table is resized or compacted.
416      * Since |count| is the number of nonempty entries to the left of
417      * front(), discarding the empty entries will not affect count, and it
418      * will make i and count equal.
419      */
onCompact()420     void onCompact() {
421       MOZ_ASSERT(valid());
422       i = count;
423     }
424 
425     /* The hash table calls this when cleared. */
onClear()426     void onClear() {
427       MOZ_ASSERT(valid());
428       i = count = 0;
429     }
430 
valid()431     bool valid() const { return next != this; }
432 
onTableDestroyed()433     void onTableDestroyed() {
434       MOZ_ASSERT(valid());
435       prevp = &next;
436       next = this;
437     }
438 
439    public:
empty()440     bool empty() const {
441       MOZ_ASSERT(valid());
442       return i >= ht->dataLength;
443     }
444 
445     /*
446      * Return the first element in the range. This must not be called if
447      * this->empty().
448      *
449      * Warning: Removing an entry from the table also removes it from any
450      * live Ranges, and a Range can become empty that way, rendering
451      * front() invalid. If in doubt, check empty() before calling front().
452      */
front()453     T& front() {
454       MOZ_ASSERT(valid());
455       MOZ_ASSERT(!empty());
456       return ht->data[i].element;
457     }
458 
459     /*
460      * Remove the first element from this range.
461      * This must not be called if this->empty().
462      *
463      * Warning: Removing an entry from the table also removes it from any
464      * live Ranges, and a Range can become empty that way, rendering
465      * popFront() invalid. If in doubt, check empty() before calling
466      * popFront().
467      */
popFront()468     void popFront() {
469       MOZ_ASSERT(valid());
470       MOZ_ASSERT(!empty());
471       MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht->data[i].element)));
472       count++;
473       i++;
474       seek();
475     }
476 
477     /*
478      * Change the key of the front entry.
479      *
480      * This calls Ops::hash on both the current key and the new key.
481      * Ops::hash on the current key must return the same hash code as
482      * when the entry was added to the table.
483      */
rekeyFront(const Key & k)484     void rekeyFront(const Key& k) {
485       MOZ_ASSERT(valid());
486       Data& entry = ht->data[i];
487       HashNumber oldHash =
488           ht->prepareHash(Ops::getKey(entry.element)) >> ht->hashShift;
489       HashNumber newHash = ht->prepareHash(k) >> ht->hashShift;
490       Ops::setKey(entry.element, k);
491       if (newHash != oldHash) {
492         // Remove this entry from its old hash chain. (If this crashes
493         // reading nullptr, it would mean we did not find this entry on
494         // the hash chain where we expected it. That probably means the
495         // key's hash code changed since it was inserted, breaking the
496         // hash code invariant.)
497         Data** ep = &ht->hashTable[oldHash];
498         while (*ep != &entry) {
499           ep = &(*ep)->chain;
500         }
501         *ep = entry.chain;
502 
503         // Add it to the new hash chain. We could just insert it at the
504         // beginning of the chain. Instead, we do a bit of work to
505         // preserve the invariant that hash chains always go in reverse
506         // insertion order (descending memory order). No code currently
507         // depends on this invariant, so it's fine to kill it if
508         // needed.
509         ep = &ht->hashTable[newHash];
510         while (*ep && *ep > &entry) {
511           ep = &(*ep)->chain;
512         }
513         entry.chain = *ep;
514         *ep = &entry;
515       }
516     }
517 
offsetOfHashTable()518     static size_t offsetOfHashTable() { return offsetof(Range, ht); }
offsetOfI()519     static size_t offsetOfI() { return offsetof(Range, i); }
offsetOfCount()520     static size_t offsetOfCount() { return offsetof(Range, count); }
offsetOfPrevP()521     static size_t offsetOfPrevP() { return offsetof(Range, prevp); }
offsetOfNext()522     static size_t offsetOfNext() { return offsetof(Range, next); }
523 
onTableDestroyed(Range * range,uint32_t arg)524     static void onTableDestroyed(Range* range, uint32_t arg) {
525       range->onTableDestroyed();
526     }
onRemove(Range * range,uint32_t arg)527     static void onRemove(Range* range, uint32_t arg) { range->onRemove(arg); }
onClear(Range * range,uint32_t arg)528     static void onClear(Range* range, uint32_t arg) { range->onClear(); }
onCompact(Range * range,uint32_t arg)529     static void onCompact(Range* range, uint32_t arg) { range->onCompact(); }
530   };
531 
all()532   Range all() { return Range(this, &ranges); }
533 
534   /*
535    * Allocate a new Range, possibly in nursery memory. The buffer must be
536    * large enough to hold a Range object.
537    *
538    * All nursery-allocated ranges can be freed in one go by calling
539    * destroyNurseryRanges().
540    */
createRange(void * buffer,bool inNursery)541   Range* createRange(void* buffer, bool inNursery) {
542     auto range = static_cast<Range*>(buffer);
543     new (range) Range(this, inNursery ? &nurseryRanges : &ranges);
544     return range;
545   }
546 
destroyNurseryRanges()547   void destroyNurseryRanges() { nurseryRanges = nullptr; }
548 
549   /*
550    * Change the value of the given key.
551    *
552    * This calls Ops::hash on both the current key and the new key.
553    * Ops::hash on the current key must return the same hash code as
554    * when the entry was added to the table.
555    */
rekeyOneEntry(const Key & current,const Key & newKey,const T & element)556   void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
557     if (current == newKey) {
558       return;
559     }
560 
561     Data* entry = lookup(current, prepareHash(current));
562     if (!entry) {
563       return;
564     }
565 
566     HashNumber oldHash = prepareHash(current) >> hashShift;
567     HashNumber newHash = prepareHash(newKey) >> hashShift;
568 
569     entry->element = element;
570 
571     // Remove this entry from its old hash chain. (If this crashes
572     // reading nullptr, it would mean we did not find this entry on
573     // the hash chain where we expected it. That probably means the
574     // key's hash code changed since it was inserted, breaking the
575     // hash code invariant.)
576     Data** ep = &hashTable[oldHash];
577     while (*ep != entry) {
578       ep = &(*ep)->chain;
579     }
580     *ep = entry->chain;
581 
582     // Add it to the new hash chain. We could just insert it at the
583     // beginning of the chain. Instead, we do a bit of work to
584     // preserve the invariant that hash chains always go in reverse
585     // insertion order (descending memory order). No code currently
586     // depends on this invariant, so it's fine to kill it if
587     // needed.
588     ep = &hashTable[newHash];
589     while (*ep && *ep > entry) {
590       ep = &(*ep)->chain;
591     }
592     entry->chain = *ep;
593     *ep = entry;
594   }
595 
offsetOfDataLength()596   static size_t offsetOfDataLength() {
597     return offsetof(OrderedHashTable, dataLength);
598   }
offsetOfData()599   static size_t offsetOfData() { return offsetof(OrderedHashTable, data); }
offsetOfDataElement()600   static constexpr size_t offsetOfDataElement() {
601     static_assert(offsetof(Data, element) == 0,
602                   "RangeFront and RangePopFront depend on offsetof(Data, "
603                   "element) being 0");
604     return offsetof(Data, element);
605   }
sizeofData()606   static constexpr size_t sizeofData() { return sizeof(Data); }
607 
608  private:
609   /* Logarithm base 2 of the number of buckets in the hash table initially. */
initialBucketsLog2()610   static uint32_t initialBucketsLog2() { return 1; }
initialBuckets()611   static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
612 
613   /*
614    * The maximum load factor (mean number of entries per bucket).
615    * It is an invariant that
616    *     dataCapacity == floor(hashBuckets() * fillFactor()).
617    *
618    * The fill factor should be between 2 and 4, and it should be chosen so that
619    * the fill factor times sizeof(Data) is close to but <= a power of 2.
620    * This fixed fill factor was chosen to make the size of the data
621    * array, in bytes, close to a power of two when sizeof(T) is 16.
622    */
fillFactor()623   static double fillFactor() { return 8.0 / 3.0; }
624 
625   /*
626    * The minimum permitted value of (liveCount / dataLength).
627    * If that ratio drops below this value, we shrink the table.
628    */
minDataFill()629   static double minDataFill() { return 0.25; }
630 
631  public:
prepareHash(const Lookup & l)632   HashNumber prepareHash(const Lookup& l) const {
633     return mozilla::ScrambleHashCode(Ops::hash(l, hcs));
634   }
635 
636  private:
637   /* The size of hashTable, in elements. Always a power of two. */
hashBuckets()638   uint32_t hashBuckets() const {
639     return 1 << (js::kHashNumberBits - hashShift);
640   }
641 
destroyData(Data * data,uint32_t length)642   static void destroyData(Data* data, uint32_t length) {
643     for (Data* p = data + length; p != data;) {
644       (--p)->~Data();
645     }
646   }
647 
freeData(Data * data,uint32_t length,uint32_t capacity)648   void freeData(Data* data, uint32_t length, uint32_t capacity) {
649     destroyData(data, length);
650     alloc.free_(data, capacity);
651   }
652 
lookup(const Lookup & l,HashNumber h)653   Data* lookup(const Lookup& l, HashNumber h) {
654     for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
655       if (Ops::match(Ops::getKey(e->element), l)) {
656         return e;
657       }
658     }
659     return nullptr;
660   }
661 
lookup(const Lookup & l)662   const Data* lookup(const Lookup& l) const {
663     return const_cast<OrderedHashTable*>(this)->lookup(l, prepareHash(l));
664   }
665 
666   /* This is called after rehashing the table. */
compacted()667   void compacted() {
668     // If we had any empty entries, compacting may have moved live entries
669     // to the left within |data|. Notify all live Ranges of the change.
670     forEachRange<&Range::onCompact>();
671   }
672 
673   /* Compact the entries in |data| and rehash them. */
rehashInPlace()674   void rehashInPlace() {
675     for (uint32_t i = 0, N = hashBuckets(); i < N; i++) {
676       hashTable[i] = nullptr;
677     }
678     Data* wp = data;
679     Data* end = data + dataLength;
680     for (Data* rp = data; rp != end; rp++) {
681       if (!Ops::isEmpty(Ops::getKey(rp->element))) {
682         HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
683         if (rp != wp) {
684           wp->element = std::move(rp->element);
685         }
686         wp->chain = hashTable[h];
687         hashTable[h] = wp;
688         wp++;
689       }
690     }
691     MOZ_ASSERT(wp == data + liveCount);
692 
693     while (wp != end) {
694       (--end)->~Data();
695     }
696     dataLength = liveCount;
697     compacted();
698   }
699 
700   /*
701    * Grow, shrink, or compact both |hashTable| and |data|.
702    *
703    * On success, this returns true, dataLength == liveCount, and there are no
704    * empty elements in data[0:dataLength]. On allocation failure, this
705    * leaves everything as it was and returns false.
706    */
rehash(uint32_t newHashShift)707   [[nodiscard]] bool rehash(uint32_t newHashShift) {
708     // If the size of the table is not changing, rehash in place to avoid
709     // allocating memory.
710     if (newHashShift == hashShift) {
711       rehashInPlace();
712       return true;
713     }
714 
715     size_t newHashBuckets = size_t(1) << (js::kHashNumberBits - newHashShift);
716     Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
717     if (!newHashTable) {
718       return false;
719     }
720     for (uint32_t i = 0; i < newHashBuckets; i++) {
721       newHashTable[i] = nullptr;
722     }
723 
724     uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
725     Data* newData = alloc.template pod_malloc<Data>(newCapacity);
726     if (!newData) {
727       alloc.free_(newHashTable, newHashBuckets);
728       return false;
729     }
730 
731     Data* wp = newData;
732     Data* end = data + dataLength;
733     for (Data* p = data; p != end; p++) {
734       if (!Ops::isEmpty(Ops::getKey(p->element))) {
735         HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
736         new (wp) Data(std::move(p->element), newHashTable[h]);
737         newHashTable[h] = wp;
738         wp++;
739       }
740     }
741     MOZ_ASSERT(wp == newData + liveCount);
742 
743     alloc.free_(hashTable, hashBuckets());
744     freeData(data, dataLength, dataCapacity);
745 
746     hashTable = newHashTable;
747     data = newData;
748     dataLength = liveCount;
749     dataCapacity = newCapacity;
750     hashShift = newHashShift;
751     MOZ_ASSERT(hashBuckets() == newHashBuckets);
752 
753     compacted();
754     return true;
755   }
756 
757   // Not copyable.
758   OrderedHashTable& operator=(const OrderedHashTable&) = delete;
759   OrderedHashTable(const OrderedHashTable&) = delete;
760 };
761 
762 }  // namespace detail
763 
764 template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
765 class OrderedHashMap {
766  public:
767   class Entry {
768     template <class, class, class>
769     friend class detail::OrderedHashTable;
770     void operator=(const Entry& rhs) {
771       const_cast<Key&>(key) = rhs.key;
772       value = rhs.value;
773     }
774 
775     void operator=(Entry&& rhs) {
776       MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
777       const_cast<Key&>(key) = std::move(rhs.key);
778       value = std::move(rhs.value);
779     }
780 
781    public:
Entry()782     Entry() : key(), value() {}
783     template <typename V>
Entry(const Key & k,V && v)784     Entry(const Key& k, V&& v) : key(k), value(std::forward<V>(v)) {}
Entry(Entry && rhs)785     Entry(Entry&& rhs) : key(std::move(rhs.key)), value(std::move(rhs.value)) {}
786 
787     const Key key;
788     Value value;
789 
offsetOfKey()790     static size_t offsetOfKey() { return offsetof(Entry, key); }
offsetOfValue()791     static size_t offsetOfValue() { return offsetof(Entry, value); }
792   };
793 
794  private:
795   struct MapOps : OrderedHashPolicy {
796     using KeyType = Key;
makeEmptyMapOps797     static void makeEmpty(Entry* e) {
798       OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
799 
800       // Clear the value. Destroying it is another possibility, but that
801       // would complicate class Entry considerably.
802       e->value = Value();
803     }
getKeyMapOps804     static const Key& getKey(const Entry& e) { return e.key; }
setKeyMapOps805     static void setKey(Entry& e, const Key& k) { const_cast<Key&>(e.key) = k; }
806   };
807 
808   typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
809   Impl impl;
810 
811  public:
812   using Range = typename Impl::Range;
813 
OrderedHashMap(AllocPolicy ap,mozilla::HashCodeScrambler hcs)814   OrderedHashMap(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
815       : impl(std::move(ap), hcs) {}
init()816   [[nodiscard]] bool init() { return impl.init(); }
count()817   uint32_t count() const { return impl.count(); }
has(const Key & key)818   bool has(const Key& key) const { return impl.has(key); }
all()819   Range all() { return impl.all(); }
get(const Key & key)820   const Entry* get(const Key& key) const { return impl.get(key); }
get(const Key & key)821   Entry* get(const Key& key) { return impl.get(key); }
remove(const Key & key,bool * foundp)822   bool remove(const Key& key, bool* foundp) { return impl.remove(key, foundp); }
clear()823   [[nodiscard]] bool clear() { return impl.clear(); }
824 
825   template <typename V>
put(const Key & key,V && value)826   [[nodiscard]] bool put(const Key& key, V&& value) {
827     return impl.put(Entry(key, std::forward<V>(value)));
828   }
829 
hash(const Key & key)830   HashNumber hash(const Key& key) const { return impl.prepareHash(key); }
831 
rekeyOneEntry(const Key & current,const Key & newKey)832   void rekeyOneEntry(const Key& current, const Key& newKey) {
833     const Entry* e = get(current);
834     if (!e) {
835       return;
836     }
837     return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
838   }
839 
createRange(void * buffer,bool inNursery)840   Range* createRange(void* buffer, bool inNursery) {
841     return impl.createRange(buffer, inNursery);
842   }
843 
destroyNurseryRanges()844   void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
845 
offsetOfEntryKey()846   static size_t offsetOfEntryKey() { return Entry::offsetOfKey(); }
offsetOfImplDataLength()847   static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
offsetOfImplData()848   static size_t offsetOfImplData() { return Impl::offsetOfData(); }
offsetOfImplDataElement()849   static constexpr size_t offsetOfImplDataElement() {
850     return Impl::offsetOfDataElement();
851   }
sizeofImplData()852   static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
853 
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)854   size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
855     return impl.sizeOfExcludingThis(mallocSizeOf);
856   }
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)857   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
858     return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
859   }
860 };
861 
862 template <class T, class OrderedHashPolicy, class AllocPolicy>
863 class OrderedHashSet {
864  private:
865   struct SetOps : OrderedHashPolicy {
866     using KeyType = const T;
getKeySetOps867     static const T& getKey(const T& v) { return v; }
setKeySetOps868     static void setKey(const T& e, const T& v) { const_cast<T&>(e) = v; }
869   };
870 
871   typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
872   Impl impl;
873 
874  public:
875   using Range = typename Impl::Range;
876 
OrderedHashSet(AllocPolicy ap,mozilla::HashCodeScrambler hcs)877   explicit OrderedHashSet(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
878       : impl(std::move(ap), hcs) {}
init()879   [[nodiscard]] bool init() { return impl.init(); }
count()880   uint32_t count() const { return impl.count(); }
has(const T & value)881   bool has(const T& value) const { return impl.has(value); }
all()882   Range all() { return impl.all(); }
put(const T & value)883   [[nodiscard]] bool put(const T& value) { return impl.put(value); }
remove(const T & value,bool * foundp)884   bool remove(const T& value, bool* foundp) {
885     return impl.remove(value, foundp);
886   }
clear()887   [[nodiscard]] bool clear() { return impl.clear(); }
888 
hash(const T & value)889   HashNumber hash(const T& value) const { return impl.prepareHash(value); }
890 
rekeyOneEntry(const T & current,const T & newKey)891   void rekeyOneEntry(const T& current, const T& newKey) {
892     return impl.rekeyOneEntry(current, newKey, newKey);
893   }
894 
createRange(void * buffer,bool inNursery)895   Range* createRange(void* buffer, bool inNursery) {
896     return impl.createRange(buffer, inNursery);
897   }
898 
destroyNurseryRanges()899   void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
900 
offsetOfEntryKey()901   static size_t offsetOfEntryKey() { return 0; }
offsetOfImplDataLength()902   static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
offsetOfImplData()903   static size_t offsetOfImplData() { return Impl::offsetOfData(); }
offsetOfImplDataElement()904   static constexpr size_t offsetOfImplDataElement() {
905     return Impl::offsetOfDataElement();
906   }
sizeofImplData()907   static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
908 
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)909   size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
910     return impl.sizeOfExcludingThis(mallocSizeOf);
911   }
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)912   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
913     return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
914   }
915 };
916 
917 }  // namespace js
918 
919 #endif /* ds_OrderedHashTable_h */
920