1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include <new>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include "PLDHashTable.h"
12 #include "mozilla/HashFunctions.h"
13 #include "mozilla/MathAlgorithms.h"
14 #include "mozilla/OperatorNewExtensions.h"
15 #include "nsAlgorithm.h"
16 #include "mozilla/Likely.h"
17 #include "mozilla/MemoryReporting.h"
18 #include "mozilla/ChaosMode.h"
19
20 using namespace mozilla;
21
22 #ifdef DEBUG
23
24 class AutoReadOp
25 {
26 Checker& mChk;
27 public:
AutoReadOp(Checker & aChk)28 explicit AutoReadOp(Checker& aChk) : mChk(aChk) { mChk.StartReadOp(); }
~AutoReadOp()29 ~AutoReadOp() { mChk.EndReadOp(); }
30 };
31
32 class AutoWriteOp
33 {
34 Checker& mChk;
35 public:
AutoWriteOp(Checker & aChk)36 explicit AutoWriteOp(Checker& aChk) : mChk(aChk) { mChk.StartWriteOp(); }
~AutoWriteOp()37 ~AutoWriteOp() { mChk.EndWriteOp(); }
38 };
39
40 class AutoIteratorRemovalOp
41 {
42 Checker& mChk;
43 public:
AutoIteratorRemovalOp(Checker & aChk)44 explicit AutoIteratorRemovalOp(Checker& aChk)
45 : mChk(aChk)
46 {
47 mChk.StartIteratorRemovalOp();
48 }
~AutoIteratorRemovalOp()49 ~AutoIteratorRemovalOp() { mChk.EndIteratorRemovalOp(); }
50 };
51
52 class AutoDestructorOp
53 {
54 Checker& mChk;
55 public:
AutoDestructorOp(Checker & aChk)56 explicit AutoDestructorOp(Checker& aChk)
57 : mChk(aChk)
58 {
59 mChk.StartDestructorOp();
60 }
~AutoDestructorOp()61 ~AutoDestructorOp() { mChk.EndDestructorOp(); }
62 };
63
64 #endif
65
66 /* static */ PLDHashNumber
HashStringKey(const void * aKey)67 PLDHashTable::HashStringKey(const void* aKey)
68 {
69 return HashString(static_cast<const char*>(aKey));
70 }
71
72 /* static */ PLDHashNumber
HashVoidPtrKeyStub(const void * aKey)73 PLDHashTable::HashVoidPtrKeyStub(const void* aKey)
74 {
75 return (PLDHashNumber)(ptrdiff_t)aKey >> 2;
76 }
77
78 /* static */ bool
MatchEntryStub(const PLDHashEntryHdr * aEntry,const void * aKey)79 PLDHashTable::MatchEntryStub(const PLDHashEntryHdr* aEntry, const void* aKey)
80 {
81 const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
82
83 return stub->key == aKey;
84 }
85
86 /* static */ bool
MatchStringKey(const PLDHashEntryHdr * aEntry,const void * aKey)87 PLDHashTable::MatchStringKey(const PLDHashEntryHdr* aEntry, const void* aKey)
88 {
89 const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
90
91 // XXX tolerate null keys on account of sloppy Mozilla callers.
92 return stub->key == aKey ||
93 (stub->key && aKey &&
94 strcmp((const char*)stub->key, (const char*)aKey) == 0);
95 }
96
97 /* static */ void
MoveEntryStub(PLDHashTable * aTable,const PLDHashEntryHdr * aFrom,PLDHashEntryHdr * aTo)98 PLDHashTable::MoveEntryStub(PLDHashTable* aTable,
99 const PLDHashEntryHdr* aFrom,
100 PLDHashEntryHdr* aTo)
101 {
102 memcpy(aTo, aFrom, aTable->mEntrySize);
103 }
104
105 /* static */ void
ClearEntryStub(PLDHashTable * aTable,PLDHashEntryHdr * aEntry)106 PLDHashTable::ClearEntryStub(PLDHashTable* aTable, PLDHashEntryHdr* aEntry)
107 {
108 memset(aEntry, 0, aTable->mEntrySize);
109 }
110
111 static const PLDHashTableOps gStubOps = {
112 PLDHashTable::HashVoidPtrKeyStub,
113 PLDHashTable::MatchEntryStub,
114 PLDHashTable::MoveEntryStub,
115 PLDHashTable::ClearEntryStub,
116 nullptr
117 };
118
119 /* static */ const PLDHashTableOps*
StubOps()120 PLDHashTable::StubOps()
121 {
122 return &gStubOps;
123 }
124
125 static bool
SizeOfEntryStore(uint32_t aCapacity,uint32_t aEntrySize,uint32_t * aNbytes)126 SizeOfEntryStore(uint32_t aCapacity, uint32_t aEntrySize, uint32_t* aNbytes)
127 {
128 uint64_t nbytes64 = uint64_t(aCapacity) * uint64_t(aEntrySize);
129 *aNbytes = aCapacity * aEntrySize;
130 return uint64_t(*aNbytes) == nbytes64; // returns false on overflow
131 }
132
133 // Compute max and min load numbers (entry counts). We have a secondary max
134 // that allows us to overload a table reasonably if it cannot be grown further
135 // (i.e. if ChangeTable() fails). The table slows down drastically if the
136 // secondary max is too close to 1, but 0.96875 gives only a slight slowdown
137 // while allowing 1.3x more elements.
138 static inline uint32_t
MaxLoad(uint32_t aCapacity)139 MaxLoad(uint32_t aCapacity)
140 {
141 return aCapacity - (aCapacity >> 2); // == aCapacity * 0.75
142 }
143 static inline uint32_t
MaxLoadOnGrowthFailure(uint32_t aCapacity)144 MaxLoadOnGrowthFailure(uint32_t aCapacity)
145 {
146 return aCapacity - (aCapacity >> 5); // == aCapacity * 0.96875
147 }
148 static inline uint32_t
MinLoad(uint32_t aCapacity)149 MinLoad(uint32_t aCapacity)
150 {
151 return aCapacity >> 2; // == aCapacity * 0.25
152 }
153
154 // Compute the minimum capacity (and the Log2 of that capacity) for a table
155 // containing |aLength| elements while respecting the following contraints:
156 // - table must be at most 75% full;
157 // - capacity must be a power of two;
158 // - capacity cannot be too small.
159 static inline void
BestCapacity(uint32_t aLength,uint32_t * aCapacityOut,uint32_t * aLog2CapacityOut)160 BestCapacity(uint32_t aLength, uint32_t* aCapacityOut,
161 uint32_t* aLog2CapacityOut)
162 {
163 // Compute the smallest capacity allowing |aLength| elements to be inserted
164 // without rehashing.
165 uint32_t capacity = (aLength * 4 + (3 - 1)) / 3; // == ceil(aLength * 4 / 3)
166 if (capacity < PLDHashTable::kMinCapacity) {
167 capacity = PLDHashTable::kMinCapacity;
168 }
169
170 // Round up capacity to next power-of-two.
171 uint32_t log2 = CeilingLog2(capacity);
172 capacity = 1u << log2;
173 MOZ_ASSERT(capacity <= PLDHashTable::kMaxCapacity);
174
175 *aCapacityOut = capacity;
176 *aLog2CapacityOut = log2;
177 }
178
179 /* static */ MOZ_ALWAYS_INLINE uint32_t
HashShift(uint32_t aEntrySize,uint32_t aLength)180 PLDHashTable::HashShift(uint32_t aEntrySize, uint32_t aLength)
181 {
182 if (aLength > kMaxInitialLength) {
183 MOZ_CRASH("Initial length is too large");
184 }
185
186 uint32_t capacity, log2;
187 BestCapacity(aLength, &capacity, &log2);
188
189 uint32_t nbytes;
190 if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
191 MOZ_CRASH("Initial entry store size is too large");
192 }
193
194 // Compute the hashShift value.
195 return kHashBits - log2;
196 }
197
PLDHashTable(const PLDHashTableOps * aOps,uint32_t aEntrySize,uint32_t aLength)198 PLDHashTable::PLDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize,
199 uint32_t aLength)
200 : mOps(aOps)
201 , mHashShift(HashShift(aEntrySize, aLength))
202 , mEntrySize(aEntrySize)
203 , mEntryCount(0)
204 , mRemovedCount(0)
205 , mEntryStore()
206 #ifdef DEBUG
207 , mChecker()
208 #endif
209 {
210 }
211
212 PLDHashTable&
operator =(PLDHashTable && aOther)213 PLDHashTable::operator=(PLDHashTable&& aOther)
214 {
215 if (this == &aOther) {
216 return *this;
217 }
218
219 // |mOps| and |mEntrySize| are required to stay the same, they're
220 // conceptually part of the type -- indeed, if PLDHashTable was a templated
221 // type like nsTHashtable, they *would* be part of the type -- so it only
222 // makes sense to assign in cases where they match.
223 MOZ_RELEASE_ASSERT(mOps == aOther.mOps);
224 MOZ_RELEASE_ASSERT(mEntrySize == aOther.mEntrySize);
225
226 // Reconstruct |this|.
227 this->~PLDHashTable();
228 new (KnownNotNull, this) PLDHashTable(aOther.mOps, aOther.mEntrySize, 0);
229
230 // Move non-const pieces over.
231 mHashShift = Move(aOther.mHashShift);
232 mEntryCount = Move(aOther.mEntryCount);
233 mRemovedCount = Move(aOther.mRemovedCount);
234 mEntryStore = Move(aOther.mEntryStore);
235 #ifdef DEBUG
236 mChecker = Move(aOther.mChecker);
237 #endif
238
239 // Clear up |aOther| so its destruction will be a no-op.
240 {
241 #ifdef DEBUG
242 AutoDestructorOp op(mChecker);
243 #endif
244 aOther.mEntryStore.Set(nullptr);
245 }
246
247 return *this;
248 }
249
250 PLDHashNumber
Hash1(PLDHashNumber aHash0)251 PLDHashTable::Hash1(PLDHashNumber aHash0)
252 {
253 return aHash0 >> mHashShift;
254 }
255
256 // Double hashing needs the second hash code to be relatively prime to table
257 // size, so we simply make hash2 odd.
258 void
Hash2(PLDHashNumber aHash,uint32_t & aHash2Out,uint32_t & aSizeMaskOut)259 PLDHashTable::Hash2(PLDHashNumber aHash,
260 uint32_t& aHash2Out, uint32_t& aSizeMaskOut)
261 {
262 uint32_t sizeLog2 = kHashBits - mHashShift;
263 aHash2Out = ((aHash << sizeLog2) >> mHashShift) | 1;
264 aSizeMaskOut = (PLDHashNumber(1) << sizeLog2) - 1;
265 }
266
267 // Reserve mKeyHash 0 for free entries and 1 for removed-entry sentinels. Note
268 // that a removed-entry sentinel need be stored only if the removed entry had
269 // a colliding entry added after it. Therefore we can use 1 as the collision
270 // flag in addition to the removed-entry sentinel value. Multiplicative hash
271 // uses the high order bits of mKeyHash, so this least-significant reservation
272 // should not hurt the hash function's effectiveness much.
273
274 // Match an entry's mKeyHash against an unstored one computed from a key.
275 /* static */ bool
MatchEntryKeyhash(PLDHashEntryHdr * aEntry,PLDHashNumber aKeyHash)276 PLDHashTable::MatchEntryKeyhash(PLDHashEntryHdr* aEntry, PLDHashNumber aKeyHash)
277 {
278 return (aEntry->mKeyHash & ~kCollisionFlag) == aKeyHash;
279 }
280
281 // Compute the address of the indexed entry in table.
282 PLDHashEntryHdr*
AddressEntry(uint32_t aIndex)283 PLDHashTable::AddressEntry(uint32_t aIndex)
284 {
285 return reinterpret_cast<PLDHashEntryHdr*>(
286 mEntryStore.Get() + aIndex * mEntrySize);
287 }
288
~PLDHashTable()289 PLDHashTable::~PLDHashTable()
290 {
291 #ifdef DEBUG
292 AutoDestructorOp op(mChecker);
293 #endif
294
295 if (!mEntryStore.Get()) {
296 return;
297 }
298
299 // Clear any remaining live entries.
300 char* entryAddr = mEntryStore.Get();
301 char* entryLimit = entryAddr + Capacity() * mEntrySize;
302 while (entryAddr < entryLimit) {
303 PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr;
304 if (EntryIsLive(entry)) {
305 mOps->clearEntry(this, entry);
306 }
307 entryAddr += mEntrySize;
308 }
309
310 // Entry storage is freed last, by ~EntryStore().
311 }
312
313 void
ClearAndPrepareForLength(uint32_t aLength)314 PLDHashTable::ClearAndPrepareForLength(uint32_t aLength)
315 {
316 // Get these values before the destructor clobbers them.
317 const PLDHashTableOps* ops = mOps;
318 uint32_t entrySize = mEntrySize;
319
320 this->~PLDHashTable();
321 new (KnownNotNull, this) PLDHashTable(ops, entrySize, aLength);
322 }
323
324 void
Clear()325 PLDHashTable::Clear()
326 {
327 ClearAndPrepareForLength(kDefaultInitialLength);
328 }
329
330 // If |Reason| is |ForAdd|, the return value is always non-null and it may be
331 // a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return
332 // value is null on a miss, and will never be a previously-removed entry on a
333 // hit. This distinction is a bit grotty but this function is hot enough that
334 // these differences are worthwhile.
335 template <PLDHashTable::SearchReason Reason>
336 PLDHashEntryHdr* NS_FASTCALL
SearchTable(const void * aKey,PLDHashNumber aKeyHash)337 PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash)
338 {
339 MOZ_ASSERT(mEntryStore.Get());
340 NS_ASSERTION(!(aKeyHash & kCollisionFlag),
341 "!(aKeyHash & kCollisionFlag)");
342
343 // Compute the primary hash address.
344 PLDHashNumber hash1 = Hash1(aKeyHash);
345 PLDHashEntryHdr* entry = AddressEntry(hash1);
346
347 // Miss: return space for a new entry.
348 if (EntryIsFree(entry)) {
349 return (Reason == ForAdd) ? entry : nullptr;
350 }
351
352 // Hit: return entry.
353 PLDHashMatchEntry matchEntry = mOps->matchEntry;
354 if (MatchEntryKeyhash(entry, aKeyHash) &&
355 matchEntry(entry, aKey)) {
356 return entry;
357 }
358
359 // Collision: double hash.
360 PLDHashNumber hash2;
361 uint32_t sizeMask;
362 Hash2(aKeyHash, hash2, sizeMask);
363
364 // Save the first removed entry pointer so Add() can recycle it. (Only used
365 // if Reason==ForAdd.)
366 PLDHashEntryHdr* firstRemoved = nullptr;
367
368 for (;;) {
369 if (Reason == ForAdd) {
370 if (MOZ_UNLIKELY(EntryIsRemoved(entry))) {
371 if (!firstRemoved) {
372 firstRemoved = entry;
373 }
374 } else {
375 entry->mKeyHash |= kCollisionFlag;
376 }
377 }
378
379 hash1 -= hash2;
380 hash1 &= sizeMask;
381
382 entry = AddressEntry(hash1);
383 if (EntryIsFree(entry)) {
384 return (Reason == ForAdd) ? (firstRemoved ? firstRemoved : entry)
385 : nullptr;
386 }
387
388 if (MatchEntryKeyhash(entry, aKeyHash) &&
389 matchEntry(entry, aKey)) {
390 return entry;
391 }
392 }
393
394 // NOTREACHED
395 return nullptr;
396 }
397
398 // This is a copy of SearchTable(), used by ChangeTable(), hardcoded to
399 // 1. assume |Reason| is |ForAdd|,
400 // 2. assume that |aKey| will never match an existing entry, and
401 // 3. assume that no entries have been removed from the current table
402 // structure.
403 // Avoiding the need for |aKey| means we can avoid needing a way to map entries
404 // to keys, which means callers can use complex key types more easily.
405 MOZ_ALWAYS_INLINE PLDHashEntryHdr*
FindFreeEntry(PLDHashNumber aKeyHash)406 PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash)
407 {
408 MOZ_ASSERT(mEntryStore.Get());
409 NS_ASSERTION(!(aKeyHash & kCollisionFlag),
410 "!(aKeyHash & kCollisionFlag)");
411
412 // Compute the primary hash address.
413 PLDHashNumber hash1 = Hash1(aKeyHash);
414 PLDHashEntryHdr* entry = AddressEntry(hash1);
415
416 // Miss: return space for a new entry.
417 if (EntryIsFree(entry)) {
418 return entry;
419 }
420
421 // Collision: double hash.
422 PLDHashNumber hash2;
423 uint32_t sizeMask;
424 Hash2(aKeyHash, hash2, sizeMask);
425
426 for (;;) {
427 NS_ASSERTION(!EntryIsRemoved(entry),
428 "!EntryIsRemoved(entry)");
429 entry->mKeyHash |= kCollisionFlag;
430
431 hash1 -= hash2;
432 hash1 &= sizeMask;
433
434 entry = AddressEntry(hash1);
435 if (EntryIsFree(entry)) {
436 return entry;
437 }
438 }
439
440 // NOTREACHED
441 }
442
443 bool
ChangeTable(int32_t aDeltaLog2)444 PLDHashTable::ChangeTable(int32_t aDeltaLog2)
445 {
446 MOZ_ASSERT(mEntryStore.Get());
447
448 // Look, but don't touch, until we succeed in getting new entry store.
449 int32_t oldLog2 = kHashBits - mHashShift;
450 int32_t newLog2 = oldLog2 + aDeltaLog2;
451 uint32_t newCapacity = 1u << newLog2;
452 if (newCapacity > kMaxCapacity) {
453 return false;
454 }
455
456 uint32_t nbytes;
457 if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) {
458 return false; // overflowed
459 }
460
461 char* newEntryStore = (char*)malloc(nbytes);
462 if (!newEntryStore) {
463 return false;
464 }
465
466 // We can't fail from here on, so update table parameters.
467 mHashShift = kHashBits - newLog2;
468 mRemovedCount = 0;
469
470 // Assign the new entry store to table.
471 memset(newEntryStore, 0, nbytes);
472 char* oldEntryStore;
473 char* oldEntryAddr;
474 oldEntryAddr = oldEntryStore = mEntryStore.Get();
475 mEntryStore.Set(newEntryStore);
476 PLDHashMoveEntry moveEntry = mOps->moveEntry;
477
478 // Copy only live entries, leaving removed ones behind.
479 uint32_t oldCapacity = 1u << oldLog2;
480 for (uint32_t i = 0; i < oldCapacity; ++i) {
481 PLDHashEntryHdr* oldEntry = (PLDHashEntryHdr*)oldEntryAddr;
482 if (EntryIsLive(oldEntry)) {
483 oldEntry->mKeyHash &= ~kCollisionFlag;
484 PLDHashEntryHdr* newEntry = FindFreeEntry(oldEntry->mKeyHash);
485 NS_ASSERTION(EntryIsFree(newEntry), "EntryIsFree(newEntry)");
486 moveEntry(this, oldEntry, newEntry);
487 newEntry->mKeyHash = oldEntry->mKeyHash;
488 }
489 oldEntryAddr += mEntrySize;
490 }
491
492 free(oldEntryStore);
493 return true;
494 }
495
496 MOZ_ALWAYS_INLINE PLDHashNumber
ComputeKeyHash(const void * aKey)497 PLDHashTable::ComputeKeyHash(const void* aKey)
498 {
499 MOZ_ASSERT(mEntryStore.Get());
500
501 PLDHashNumber keyHash = mOps->hashKey(aKey);
502 keyHash *= kGoldenRatio;
503
504 // Avoid 0 and 1 hash codes, they indicate free and removed entries.
505 if (keyHash < 2) {
506 keyHash -= 2;
507 }
508 keyHash &= ~kCollisionFlag;
509
510 return keyHash;
511 }
512
513 PLDHashEntryHdr*
Search(const void * aKey)514 PLDHashTable::Search(const void* aKey)
515 {
516 #ifdef DEBUG
517 AutoReadOp op(mChecker);
518 #endif
519
520 PLDHashEntryHdr* entry = mEntryStore.Get()
521 ? SearchTable<ForSearchOrRemove>(aKey,
522 ComputeKeyHash(aKey))
523 : nullptr;
524 return entry;
525 }
526
527 PLDHashEntryHdr*
Add(const void * aKey,const mozilla::fallible_t &)528 PLDHashTable::Add(const void* aKey, const mozilla::fallible_t&)
529 {
530 #ifdef DEBUG
531 AutoWriteOp op(mChecker);
532 #endif
533
534 // Allocate the entry storage if it hasn't already been allocated.
535 if (!mEntryStore.Get()) {
536 uint32_t nbytes;
537 // We already checked this in the constructor, so it must still be true.
538 MOZ_RELEASE_ASSERT(SizeOfEntryStore(CapacityFromHashShift(), mEntrySize,
539 &nbytes));
540 mEntryStore.Set((char*)malloc(nbytes));
541 if (!mEntryStore.Get()) {
542 return nullptr;
543 }
544 memset(mEntryStore.Get(), 0, nbytes);
545 }
546
547 // If alpha is >= .75, grow or compress the table. If aKey is already in the
548 // table, we may grow once more than necessary, but only if we are on the
549 // edge of being overloaded.
550 uint32_t capacity = Capacity();
551 if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) {
552 // Compress if a quarter or more of all entries are removed.
553 int deltaLog2;
554 if (mRemovedCount >= capacity >> 2) {
555 deltaLog2 = 0;
556 } else {
557 deltaLog2 = 1;
558 }
559
560 // Grow or compress the table. If ChangeTable() fails, allow overloading up
561 // to the secondary max. Once we hit the secondary max, return null.
562 if (!ChangeTable(deltaLog2) &&
563 mEntryCount + mRemovedCount >= MaxLoadOnGrowthFailure(capacity)) {
564 return nullptr;
565 }
566 }
567
568 // Look for entry after possibly growing, so we don't have to add it,
569 // then skip it while growing the table and re-add it after.
570 PLDHashNumber keyHash = ComputeKeyHash(aKey);
571 PLDHashEntryHdr* entry = SearchTable<ForAdd>(aKey, keyHash);
572 if (!EntryIsLive(entry)) {
573 // Initialize the entry, indicating that it's no longer free.
574 if (EntryIsRemoved(entry)) {
575 mRemovedCount--;
576 keyHash |= kCollisionFlag;
577 }
578 if (mOps->initEntry) {
579 mOps->initEntry(entry, aKey);
580 }
581 entry->mKeyHash = keyHash;
582 mEntryCount++;
583 }
584
585 return entry;
586 }
587
588 PLDHashEntryHdr*
Add(const void * aKey)589 PLDHashTable::Add(const void* aKey)
590 {
591 PLDHashEntryHdr* entry = Add(aKey, fallible);
592 if (!entry) {
593 if (!mEntryStore.Get()) {
594 // We OOM'd while allocating the initial entry storage.
595 uint32_t nbytes;
596 (void) SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes);
597 NS_ABORT_OOM(nbytes);
598 } else {
599 // We failed to resize the existing entry storage, either due to OOM or
600 // because we exceeded the maximum table capacity or size; report it as
601 // an OOM. The multiplication by 2 gets us the size we tried to allocate,
602 // which is double the current size.
603 NS_ABORT_OOM(2 * EntrySize() * EntryCount());
604 }
605 }
606 return entry;
607 }
608
609 void
Remove(const void * aKey)610 PLDHashTable::Remove(const void* aKey)
611 {
612 #ifdef DEBUG
613 AutoWriteOp op(mChecker);
614 #endif
615
616 PLDHashEntryHdr* entry = mEntryStore.Get()
617 ? SearchTable<ForSearchOrRemove>(aKey,
618 ComputeKeyHash(aKey))
619 : nullptr;
620 if (entry) {
621 RawRemove(entry);
622 ShrinkIfAppropriate();
623 }
624 }
625
626 void
RemoveEntry(PLDHashEntryHdr * aEntry)627 PLDHashTable::RemoveEntry(PLDHashEntryHdr* aEntry)
628 {
629 #ifdef DEBUG
630 AutoWriteOp op(mChecker);
631 #endif
632
633 RawRemove(aEntry);
634 ShrinkIfAppropriate();
635 }
636
637 void
RawRemove(PLDHashEntryHdr * aEntry)638 PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry)
639 {
640 // Unfortunately, we can only do weak checking here. That's because
641 // RawRemove() can be called legitimately while an Enumerate() call is
642 // active, which doesn't fit well into how Checker's mState variable works.
643 MOZ_ASSERT(mChecker.IsWritable());
644
645 MOZ_ASSERT(mEntryStore.Get());
646
647 MOZ_ASSERT(EntryIsLive(aEntry), "EntryIsLive(aEntry)");
648
649 // Load keyHash first in case clearEntry() goofs it.
650 PLDHashNumber keyHash = aEntry->mKeyHash;
651 mOps->clearEntry(this, aEntry);
652 if (keyHash & kCollisionFlag) {
653 MarkEntryRemoved(aEntry);
654 mRemovedCount++;
655 } else {
656 MarkEntryFree(aEntry);
657 }
658 mEntryCount--;
659 }
660
661 // Shrink or compress if a quarter or more of all entries are removed, or if the
662 // table is underloaded according to the minimum alpha, and is not minimal-size
663 // already.
664 void
ShrinkIfAppropriate()665 PLDHashTable::ShrinkIfAppropriate()
666 {
667 uint32_t capacity = Capacity();
668 if (mRemovedCount >= capacity >> 2 ||
669 (capacity > kMinCapacity && mEntryCount <= MinLoad(capacity))) {
670 uint32_t log2;
671 BestCapacity(mEntryCount, &capacity, &log2);
672
673 int32_t deltaLog2 = log2 - (kHashBits - mHashShift);
674 MOZ_ASSERT(deltaLog2 <= 0);
675
676 (void) ChangeTable(deltaLog2);
677 }
678 }
679
680 size_t
ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const681 PLDHashTable::ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
682 {
683 #ifdef DEBUG
684 AutoReadOp op(mChecker);
685 #endif
686
687 return aMallocSizeOf(mEntryStore.Get());
688 }
689
690 size_t
ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const691 PLDHashTable::ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
692 {
693 return aMallocSizeOf(this) + ShallowSizeOfExcludingThis(aMallocSizeOf);
694 }
695
Iterator(Iterator && aOther)696 PLDHashTable::Iterator::Iterator(Iterator&& aOther)
697 : mTable(aOther.mTable)
698 , mStart(aOther.mStart)
699 , mLimit(aOther.mLimit)
700 , mCurrent(aOther.mCurrent)
701 , mNexts(aOther.mNexts)
702 , mNextsLimit(aOther.mNextsLimit)
703 , mHaveRemoved(aOther.mHaveRemoved)
704 {
705 // No need to change |mChecker| here.
706 aOther.mTable = nullptr;
707 aOther.mStart = nullptr;
708 aOther.mLimit = nullptr;
709 aOther.mCurrent = nullptr;
710 aOther.mNexts = 0;
711 aOther.mNextsLimit = 0;
712 aOther.mHaveRemoved = false;
713 }
714
Iterator(PLDHashTable * aTable)715 PLDHashTable::Iterator::Iterator(PLDHashTable* aTable)
716 : mTable(aTable)
717 , mStart(mTable->mEntryStore.Get())
718 , mLimit(mTable->mEntryStore.Get() + mTable->Capacity() * mTable->mEntrySize)
719 , mCurrent(mTable->mEntryStore.Get())
720 , mNexts(0)
721 , mNextsLimit(mTable->EntryCount())
722 , mHaveRemoved(false)
723 {
724 #ifdef DEBUG
725 mTable->mChecker.StartReadOp();
726 #endif
727
728 if (ChaosMode::isActive(ChaosFeature::HashTableIteration) &&
729 mTable->Capacity() > 0) {
730 // Start iterating at a random entry. It would be even more chaotic to
731 // iterate in fully random order, but that's harder.
732 mCurrent += ChaosMode::randomUint32LessThan(mTable->Capacity()) *
733 mTable->mEntrySize;
734 }
735
736 // Advance to the first live entry, if there is one.
737 if (!Done()) {
738 while (IsOnNonLiveEntry()) {
739 MoveToNextEntry();
740 }
741 }
742 }
743
~Iterator()744 PLDHashTable::Iterator::~Iterator()
745 {
746 if (mTable) {
747 if (mHaveRemoved) {
748 mTable->ShrinkIfAppropriate();
749 }
750 #ifdef DEBUG
751 mTable->mChecker.EndReadOp();
752 #endif
753 }
754 }
755
756 MOZ_ALWAYS_INLINE bool
IsOnNonLiveEntry() const757 PLDHashTable::Iterator::IsOnNonLiveEntry() const
758 {
759 MOZ_ASSERT(!Done());
760 return !EntryIsLive(reinterpret_cast<PLDHashEntryHdr*>(mCurrent));
761 }
762
763 MOZ_ALWAYS_INLINE void
MoveToNextEntry()764 PLDHashTable::Iterator::MoveToNextEntry()
765 {
766 mCurrent += mTable->mEntrySize;
767 if (mCurrent == mLimit) {
768 mCurrent = mStart; // Wrap-around. Possible due to Chaos Mode.
769 }
770 }
771
772 void
Next()773 PLDHashTable::Iterator::Next()
774 {
775 MOZ_ASSERT(!Done());
776
777 mNexts++;
778
779 // Advance to the next live entry, if there is one.
780 if (!Done()) {
781 do {
782 MoveToNextEntry();
783 } while (IsOnNonLiveEntry());
784 }
785 }
786
787 void
Remove()788 PLDHashTable::Iterator::Remove()
789 {
790 // This cast is needed for the same reason as the one in the destructor.
791 mTable->RawRemove(Get());
792 mHaveRemoved = true;
793 }
794
795 #ifdef DEBUG
796 void
MarkImmutable()797 PLDHashTable::MarkImmutable()
798 {
799 mChecker.SetNonWritable();
800 }
801 #endif
802