1 /* 2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights 3 * reserved. 4 * Copyright (C) 2008 David Levin <levin@chromium.org> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Library General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU Library General Public License 17 * along with this library; see the file COPYING.LIB. If not, write to 18 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, 19 * Boston, MA 02110-1301, USA. 20 * 21 */ 22 23 #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_TABLE_H_ 24 #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_TABLE_H_ 25 26 #include <memory> 27 28 #include "base/bits.h" 29 #include "base/numerics/checked_math.h" 30 #include "third_party/blink/renderer/platform/wtf/allocator/allocator.h" 31 #include "third_party/blink/renderer/platform/wtf/allocator/partition_allocator.h" 32 #include "third_party/blink/renderer/platform/wtf/assertions.h" 33 #include "third_party/blink/renderer/platform/wtf/conditional_destructor.h" 34 #include "third_party/blink/renderer/platform/wtf/construct_traits.h" 35 #include "third_party/blink/renderer/platform/wtf/hash_traits.h" 36 37 #if !defined(DUMP_HASHTABLE_STATS) 38 #define DUMP_HASHTABLE_STATS 0 39 #endif 40 41 #if !defined(DUMP_HASHTABLE_STATS_PER_TABLE) 42 #define DUMP_HASHTABLE_STATS_PER_TABLE 0 43 #endif 44 45 #if DUMP_HASHTABLE_STATS 46 #include "third_party/blink/renderer/platform/wtf/threading.h" 47 #endif 48 49 #if DUMP_HASHTABLE_STATS_PER_TABLE 50 #include <type_traits> 51 #include "third_party/blink/renderer/platform/wtf/DataLog.h" 52 #endif 53 54 #if DUMP_HASHTABLE_STATS 55 #if DUMP_HASHTABLE_STATS_PER_TABLE 56 57 #define UPDATE_PROBE_COUNTS() \ 58 ++probeCount; \ 59 HashTableStats::instance().recordCollisionAtCount(probeCount); \ 60 ++perTableProbeCount; \ 61 stats_->recordCollisionAtCount(perTableProbeCount) 62 #define UPDATE_ACCESS_COUNTS() \ 63 HashTableStats::instance().numAccesses.fetch_add(1, \ 64 std::memory_order_relaxed); \ 65 int probeCount = 0; \ 66 stats_->numAccesses.fetch_add(1, std::memory_order_relaxed); \ 67 int perTableProbeCount = 0 68 #else 69 #define UPDATE_PROBE_COUNTS() \ 70 ++probeCount; \ 71 HashTableStats::instance().recordCollisionAtCount(probeCount) 72 #define UPDATE_ACCESS_COUNTS() \ 73 HashTableStats::instance().numAccesses.fetch_add(1, \ 74 std::memory_order_relaxed); \ 75 int probeCount = 0 76 #endif 77 #else 78 #if DUMP_HASHTABLE_STATS_PER_TABLE 79 #define UPDATE_PROBE_COUNTS() \ 80 ++perTableProbeCount; \ 81 stats_->recordCollisionAtCount(perTableProbeCount) 82 #define UPDATE_ACCESS_COUNTS() \ 83 stats_->numAccesses.fetch_add(1, std::memory_order_relaxed); \ 84 int perTableProbeCount = 0 85 #else 86 #define UPDATE_PROBE_COUNTS() \ 87 do { \ 88 } while (0) 89 #define UPDATE_ACCESS_COUNTS() \ 90 do { \ 91 } while (0) 92 #endif 93 #endif 94 95 namespace WTF { 96 97 // This is for tracing inside collections that have special support for weak 98 // pointers. 99 // 100 // Structure: 101 // - |Trace|: Traces the contents and returns true if there are still unmarked 102 // objects left to process 103 // 104 // Default implementation for non-weak types is to use the regular non-weak 105 // TraceTrait. Default implementation for types with weakness is to 106 // call |TraceInCollection| on the type's trait. 107 template <WeakHandlingFlag weakness, typename T, typename Traits> 108 struct TraceInCollectionTrait; 109 110 #if DUMP_HASHTABLE_STATS || DUMP_HASHTABLE_STATS_PER_TABLE 111 struct WTF_EXPORT HashTableStats { HashTableStatsHashTableStats112 HashTableStats() 113 : numAccesses(0), 114 numRehashes(0), 115 numRemoves(0), 116 numReinserts(0), 117 maxCollisions(0), 118 numCollisions(0), 119 collisionGraph() {} 120 121 // The following variables are all atomically incremented when modified. 122 std::atomic_int numAccesses; 123 std::atomic_int numRehashes; 124 std::atomic_int numRemoves; 125 std::atomic_int numReinserts; 126 127 // The following variables are only modified in the recordCollisionAtCount 128 // method within a mutex. 129 int maxCollisions; 130 int numCollisions; 131 int collisionGraph[4096]; 132 133 void copy(const HashTableStats* other); 134 void recordCollisionAtCount(int count); 135 void DumpStats(); 136 137 static HashTableStats& instance(); 138 139 template <typename VisitorDispatcher> traceHashTableStats140 void trace(VisitorDispatcher) const {} 141 142 private: 143 void RecordCollisionAtCountWithoutLock(int count); 144 void DumpStatsWithoutLock(); 145 }; 146 147 #if DUMP_HASHTABLE_STATS_PER_TABLE 148 template <typename Allocator, bool isGCType = Allocator::kIsGarbageCollected> 149 class HashTableStatsPtr; 150 151 template <typename Allocator> 152 class HashTableStatsPtr<Allocator, false> final { 153 STATIC_ONLY(HashTableStatsPtr); 154 155 public: Create()156 static std::unique_ptr<HashTableStats> Create() { 157 return std::make_unique<HashTableStats>(); 158 } 159 copy(const std::unique_ptr<HashTableStats> & other)160 static std::unique_ptr<HashTableStats> copy( 161 const std::unique_ptr<HashTableStats>& other) { 162 if (!other) 163 return nullptr; 164 return std::make_unique<HashTableStats>(*other); 165 } 166 swap(std::unique_ptr<HashTableStats> & stats,std::unique_ptr<HashTableStats> & other)167 static void swap(std::unique_ptr<HashTableStats>& stats, 168 std::unique_ptr<HashTableStats>& other) { 169 stats.swap(other); 170 } 171 }; 172 173 template <typename Allocator> 174 class HashTableStatsPtr<Allocator, true> final { 175 STATIC_ONLY(HashTableStatsPtr); 176 177 public: Create()178 static HashTableStats* Create() { 179 // TODO(cavalcantii): fix this. 180 return new HashTableStats; 181 } 182 copy(const HashTableStats * other)183 static HashTableStats* copy(const HashTableStats* other) { 184 if (!other) 185 return nullptr; 186 HashTableStats* obj = Create(); 187 obj->copy(other); 188 return obj; 189 } 190 swap(HashTableStats * & stats,HashTableStats * & other)191 static void swap(HashTableStats*& stats, HashTableStats*& other) { 192 std::swap(stats, other); 193 } 194 }; 195 #endif 196 #endif 197 198 template <typename Key, 199 typename Value, 200 typename Extractor, 201 typename HashFunctions, 202 typename Traits, 203 typename KeyTraits, 204 typename Allocator> 205 class HashTable; 206 template <typename Key, 207 typename Value, 208 typename Extractor, 209 typename HashFunctions, 210 typename Traits, 211 typename KeyTraits, 212 typename Allocator> 213 class HashTableIterator; 214 template <typename Key, 215 typename Value, 216 typename Extractor, 217 typename HashFunctions, 218 typename Traits, 219 typename KeyTraits, 220 typename Allocator> 221 class HashTableConstIterator; 222 template <typename Value, 223 typename HashFunctions, 224 typename HashTraits, 225 typename Allocator> 226 class LinkedHashSet; 227 template <WeakHandlingFlag x, 228 typename T, 229 typename U, 230 typename V, 231 typename W, 232 typename X, 233 typename Y, 234 typename Z> 235 struct WeakProcessingHashTableHelper; 236 237 typedef enum { kHashItemKnownGood } HashItemKnownGoodTag; 238 239 template <typename Key, 240 typename Value, 241 typename Extractor, 242 typename HashFunctions, 243 typename Traits, 244 typename KeyTraits, 245 typename Allocator> 246 class HashTableConstIterator final { 247 DISALLOW_NEW(); 248 249 private: 250 typedef HashTable<Key, 251 Value, 252 Extractor, 253 HashFunctions, 254 Traits, 255 KeyTraits, 256 Allocator> 257 HashTableType; 258 typedef HashTableIterator<Key, 259 Value, 260 Extractor, 261 HashFunctions, 262 Traits, 263 KeyTraits, 264 Allocator> 265 iterator; 266 typedef HashTableConstIterator<Key, 267 Value, 268 Extractor, 269 HashFunctions, 270 Traits, 271 KeyTraits, 272 Allocator> 273 const_iterator; 274 typedef Value ValueType; 275 using value_type = ValueType; 276 typedef typename Traits::IteratorConstGetType GetType; 277 typedef const ValueType* PointerType; 278 279 friend class HashTable<Key, 280 Value, 281 Extractor, 282 HashFunctions, 283 Traits, 284 KeyTraits, 285 Allocator>; 286 friend class HashTableIterator<Key, 287 Value, 288 Extractor, 289 HashFunctions, 290 Traits, 291 KeyTraits, 292 Allocator>; 293 SkipEmptyBuckets()294 void SkipEmptyBuckets() { 295 while (position_ != end_position_ && 296 HashTableType::IsEmptyOrDeletedBucket(*position_)) 297 ++position_; 298 } 299 ReverseSkipEmptyBuckets()300 void ReverseSkipEmptyBuckets() { 301 // Don't need to check for out-of-bounds positions, as begin position is 302 // always going to be a non-empty bucket. 303 while (HashTableType::IsEmptyOrDeletedBucket(*position_)) { 304 #if DCHECK_IS_ON() 305 DCHECK_NE(position_, begin_position_); 306 #endif 307 --position_; 308 } 309 } 310 HashTableConstIterator(PointerType position,PointerType begin_position,PointerType end_position,const HashTableType * container)311 HashTableConstIterator(PointerType position, 312 PointerType begin_position, 313 PointerType end_position, 314 const HashTableType* container) 315 : position_(position), 316 end_position_(end_position) 317 #if DCHECK_IS_ON() 318 , 319 begin_position_(begin_position), 320 container_(container), 321 container_modifications_(container->Modifications()) 322 #endif 323 { 324 SkipEmptyBuckets(); 325 } 326 HashTableConstIterator(PointerType position,PointerType begin_position,PointerType end_position,const HashTableType * container,HashItemKnownGoodTag)327 HashTableConstIterator(PointerType position, 328 PointerType begin_position, 329 PointerType end_position, 330 const HashTableType* container, 331 HashItemKnownGoodTag) 332 : position_(position), 333 end_position_(end_position) 334 #if DCHECK_IS_ON() 335 , 336 begin_position_(begin_position), 337 container_(container), 338 container_modifications_(container->Modifications()) 339 #endif 340 { 341 #if DCHECK_IS_ON() 342 DCHECK_EQ(container_modifications_, container_->Modifications()); 343 #endif 344 } 345 CheckModifications()346 void CheckModifications() const { 347 #if DCHECK_IS_ON() 348 // HashTable and collections that build on it do not support 349 // modifications while there is an iterator in use. The exception is 350 // ListHashSet, which has its own iterators that tolerate modification 351 // of the underlying set. 352 DCHECK_EQ(container_modifications_, container_->Modifications()); 353 DCHECK(!container_->AccessForbidden()); 354 #endif 355 } 356 357 public: 358 HashTableConstIterator() = default; 359 Get()360 GetType Get() const { 361 CheckModifications(); 362 return position_; 363 } 364 typename Traits::IteratorConstReferenceType operator*() const { 365 return Traits::GetToReferenceConstConversion(Get()); 366 } 367 GetType operator->() const { return Get(); } 368 369 const_iterator& operator++() { 370 DCHECK_NE(position_, end_position_); 371 CheckModifications(); 372 ++position_; 373 SkipEmptyBuckets(); 374 return *this; 375 } 376 377 // postfix ++ intentionally omitted 378 379 const_iterator& operator--() { 380 #if DCHECK_IS_ON() 381 DCHECK_NE(position_, begin_position_); 382 #endif 383 CheckModifications(); 384 --position_; 385 ReverseSkipEmptyBuckets(); 386 return *this; 387 } 388 389 // postfix -- intentionally omitted 390 391 // Comparison. 392 bool operator==(const const_iterator& other) const { 393 return position_ == other.position_; 394 } 395 bool operator!=(const const_iterator& other) const { 396 return position_ != other.position_; 397 } 398 bool operator==(const iterator& other) const { 399 return *this == static_cast<const_iterator>(other); 400 } 401 bool operator!=(const iterator& other) const { 402 return *this != static_cast<const_iterator>(other); 403 } 404 PrintTo(std::ostream & stream)405 std::ostream& PrintTo(std::ostream& stream) const { 406 if (position_ == end_position_) 407 return stream << "iterator representing <end>"; 408 // TODO(tkent): Change |position_| to |*position_| to show the 409 // pointed object. It requires a lot of new stream printer functions. 410 return stream << "iterator pointing to " << position_; 411 } 412 413 private: 414 PointerType position_; 415 PointerType end_position_; 416 #if DCHECK_IS_ON() 417 PointerType begin_position_; 418 const HashTableType* container_; 419 int64_t container_modifications_; 420 #endif 421 }; 422 423 template <typename Key, 424 typename Value, 425 typename Extractor, 426 typename Hash, 427 typename Traits, 428 typename KeyTraits, 429 typename Allocator> 430 std::ostream& operator<<(std::ostream& stream, 431 const HashTableConstIterator<Key, 432 Value, 433 Extractor, 434 Hash, 435 Traits, 436 KeyTraits, 437 Allocator>& iterator) { 438 return iterator.PrintTo(stream); 439 } 440 441 template <typename Key, 442 typename Value, 443 typename Extractor, 444 typename HashFunctions, 445 typename Traits, 446 typename KeyTraits, 447 typename Allocator> 448 class HashTableIterator final { 449 DISALLOW_NEW(); 450 451 private: 452 typedef HashTable<Key, 453 Value, 454 Extractor, 455 HashFunctions, 456 Traits, 457 KeyTraits, 458 Allocator> 459 HashTableType; 460 typedef HashTableIterator<Key, 461 Value, 462 Extractor, 463 HashFunctions, 464 Traits, 465 KeyTraits, 466 Allocator> 467 iterator; 468 typedef HashTableConstIterator<Key, 469 Value, 470 Extractor, 471 HashFunctions, 472 Traits, 473 KeyTraits, 474 Allocator> 475 const_iterator; 476 typedef Value ValueType; 477 typedef typename Traits::IteratorGetType GetType; 478 typedef ValueType* PointerType; 479 480 friend class HashTable<Key, 481 Value, 482 Extractor, 483 HashFunctions, 484 Traits, 485 KeyTraits, 486 Allocator>; 487 HashTableIterator(PointerType pos,PointerType begin,PointerType end,const HashTableType * container)488 HashTableIterator(PointerType pos, 489 PointerType begin, 490 PointerType end, 491 const HashTableType* container) 492 : iterator_(pos, begin, end, container) {} HashTableIterator(PointerType pos,PointerType begin,PointerType end,const HashTableType * container,HashItemKnownGoodTag tag)493 HashTableIterator(PointerType pos, 494 PointerType begin, 495 PointerType end, 496 const HashTableType* container, 497 HashItemKnownGoodTag tag) 498 : iterator_(pos, begin, end, container, tag) {} 499 500 public: 501 HashTableIterator() = default; 502 503 // default copy, assignment and destructor are OK 504 Get()505 GetType Get() const { return const_cast<GetType>(iterator_.Get()); } 506 typename Traits::IteratorReferenceType operator*() const { 507 return Traits::GetToReferenceConversion(Get()); 508 } 509 GetType operator->() const { return Get(); } 510 511 iterator& operator++() { 512 ++iterator_; 513 return *this; 514 } 515 516 // postfix ++ intentionally omitted 517 518 iterator& operator--() { 519 --iterator_; 520 return *this; 521 } 522 523 // postfix -- intentionally omitted 524 525 // Comparison. 526 bool operator==(const iterator& other) const { 527 return iterator_ == other.iterator_; 528 } 529 bool operator!=(const iterator& other) const { 530 return iterator_ != other.iterator_; 531 } 532 bool operator==(const const_iterator& other) const { 533 return iterator_ == other; 534 } 535 bool operator!=(const const_iterator& other) const { 536 return iterator_ != other; 537 } 538 const_iterator()539 operator const_iterator() const { return iterator_; } PrintTo(std::ostream & stream)540 std::ostream& PrintTo(std::ostream& stream) const { 541 return iterator_.PrintTo(stream); 542 } 543 544 private: 545 const_iterator iterator_; 546 }; 547 548 template <typename Key, 549 typename Value, 550 typename Extractor, 551 typename Hash, 552 typename Traits, 553 typename KeyTraits, 554 typename Allocator> 555 std::ostream& operator<<(std::ostream& stream, 556 const HashTableIterator<Key, 557 Value, 558 Extractor, 559 Hash, 560 Traits, 561 KeyTraits, 562 Allocator>& iterator) { 563 return iterator.PrintTo(stream); 564 } 565 566 using std::swap; 567 568 template <typename T, 569 typename Allocator, 570 typename Traits, 571 bool enterGCForbiddenScope> 572 struct Mover { 573 STATIC_ONLY(Mover); MoveMover574 static void Move(T&& from, T& to) { 575 to.~T(); 576 new (NotNull, &to) T(std::move(from)); 577 } 578 }; 579 580 template <typename T, typename Allocator, typename Traits> 581 struct Mover<T, Allocator, Traits, true> { 582 STATIC_ONLY(Mover); 583 static void Move(T&& from, T& to) { 584 Allocator::EnterGCForbiddenScope(); 585 to.~T(); 586 new (NotNull, &to) T(std::move(from)); 587 Allocator::LeaveGCForbiddenScope(); 588 } 589 }; 590 591 template <typename HashFunctions, typename Traits, typename Allocator> 592 class IdentityHashTranslator { 593 STATIC_ONLY(IdentityHashTranslator); 594 595 public: 596 template <typename T> 597 static unsigned GetHash(const T& key) { 598 return HashFunctions::GetHash(key); 599 } 600 template <typename T, typename U> 601 static bool Equal(const T& a, const U& b) { 602 return HashFunctions::Equal(a, b); 603 } 604 template <typename T, typename U, typename V> 605 static void Translate(T& location, U&&, V&& value) { 606 location = std::forward<V>(value); 607 } 608 }; 609 610 template <typename HashTableType, typename ValueType> 611 struct HashTableAddResult final { 612 STACK_ALLOCATED(); 613 614 public: 615 HashTableAddResult(const HashTableType* container, 616 ValueType* stored_value, 617 bool is_new_entry) 618 : stored_value(stored_value), 619 is_new_entry(is_new_entry) 620 #if ENABLE_SECURITY_ASSERT 621 , 622 container_(container), 623 container_modifications_(container->Modifications()) 624 #endif 625 { 626 ALLOW_UNUSED_LOCAL(container); 627 DCHECK(container); 628 } 629 630 ValueType* stored_value; 631 bool is_new_entry; 632 633 #if ENABLE_SECURITY_ASSERT 634 ~HashTableAddResult() { 635 // If rehash happened before accessing storedValue, it's 636 // use-after-free. Any modification may cause a rehash, so we check for 637 // modifications here. 638 639 // Rehash after accessing storedValue is harmless but will assert if the 640 // AddResult destructor takes place after a modification. You may need 641 // to limit the scope of the AddResult. 642 SECURITY_DCHECK(container_modifications_ == container_->Modifications()); 643 } 644 645 private: 646 const HashTableType* container_; 647 const int64_t container_modifications_; 648 #endif 649 }; 650 651 template <typename Value, typename Extractor, typename KeyTraits> 652 struct HashTableHelper { 653 template <typename T> 654 struct AddConstToPtrType { 655 using type = T; 656 }; 657 template <typename T> 658 struct AddConstToPtrType<T*> { 659 using type = const T*; 660 }; 661 662 using Key = typename AddConstToPtrType<typename KeyTraits::TraitType>::type; 663 664 STATIC_ONLY(HashTableHelper); 665 static bool IsEmptyBucket(const Key& key) { 666 return IsHashTraitsEmptyValue<KeyTraits>(key); 667 } 668 static bool IsDeletedBucket(const Key& key) { 669 return KeyTraits::IsDeletedValue(key); 670 } 671 static bool IsEmptyOrDeletedBucket(const Value& value) { 672 const Key& key = Extractor::Extract(value); 673 return IsEmptyBucket(key) || IsDeletedBucket(key); 674 } 675 static constexpr size_t constexpr_max(size_t a, size_t b) { return a > b ? a : b; } 676 static bool IsEmptyOrDeletedBucketSafe(const Value& value) { 677 alignas(constexpr_max(alignof(Key), sizeof(size_t))) char buf[sizeof(Key)]; 678 const Key& key = Extractor::ExtractSafe(value, &buf); 679 return IsEmptyBucket(key) || IsDeletedBucket(key); 680 } 681 }; 682 683 template <typename HashTranslator, 684 typename KeyTraits, 685 bool safeToCompareToEmptyOrDeleted> 686 struct HashTableKeyChecker { 687 STATIC_ONLY(HashTableKeyChecker); 688 // There's no simple generic way to make this check if 689 // safeToCompareToEmptyOrDeleted is false, so the check always passes. 690 template <typename T> 691 static bool CheckKey(const T&) { 692 return true; 693 } 694 }; 695 696 template <typename HashTranslator, typename KeyTraits> 697 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> { 698 STATIC_ONLY(HashTableKeyChecker); 699 template <typename T> 700 static bool CheckKey(const T& key) { 701 // FIXME : Check also equality to the deleted value. 702 return !HashTranslator::Equal(KeyTraits::EmptyValue(), key); 703 } 704 }; 705 706 // Note: empty or deleted key values are not allowed, using them may lead to 707 // undefined behavior. For pointer keys this means that null pointers are not 708 // allowed unless you supply custom key traits. 709 template <typename Key, 710 typename Value, 711 typename Extractor, 712 typename HashFunctions, 713 typename Traits, 714 typename KeyTraits, 715 typename Allocator> 716 class HashTable final 717 : public ConditionalDestructor<HashTable<Key, 718 Value, 719 Extractor, 720 HashFunctions, 721 Traits, 722 KeyTraits, 723 Allocator>, 724 Allocator::kIsGarbageCollected> { 725 DISALLOW_NEW(); 726 727 public: 728 typedef HashTableIterator<Key, 729 Value, 730 Extractor, 731 HashFunctions, 732 Traits, 733 KeyTraits, 734 Allocator> 735 iterator; 736 typedef HashTableConstIterator<Key, 737 Value, 738 Extractor, 739 HashFunctions, 740 Traits, 741 KeyTraits, 742 Allocator> 743 const_iterator; 744 typedef Traits ValueTraits; 745 typedef Key KeyType; 746 typedef typename KeyTraits::PeekInType KeyPeekInType; 747 typedef Value ValueType; 748 typedef Extractor ExtractorType; 749 typedef KeyTraits KeyTraitsType; 750 typedef IdentityHashTranslator<HashFunctions, Traits, Allocator> 751 IdentityTranslatorType; 752 typedef HashTableAddResult<HashTable, ValueType> AddResult; 753 754 HashTable(); 755 756 void Finalize() { 757 static_assert(!Allocator::kIsGarbageCollected, 758 "GCed collections can't be finalized."); 759 if (LIKELY(!table_)) 760 return; 761 EnterAccessForbiddenScope(); 762 DeleteAllBucketsAndDeallocate(table_, table_size_); 763 LeaveAccessForbiddenScope(); 764 table_ = nullptr; 765 } 766 767 HashTable(const HashTable&); 768 HashTable(HashTable&&); 769 void swap(HashTable&); 770 HashTable& operator=(const HashTable&); 771 HashTable& operator=(HashTable&&); 772 773 // When the hash table is empty, just return the same iterator for end as 774 // for begin. This is more efficient because we don't have to skip all the 775 // empty and deleted buckets, and iterating an empty table is a common case 776 // that's worth optimizing. 777 iterator begin() { return IsEmpty() ? end() : MakeIterator(table_); } 778 iterator end() { return MakeKnownGoodIterator(table_ + table_size_); } 779 const_iterator begin() const { 780 return IsEmpty() ? end() : MakeConstIterator(table_); 781 } 782 const_iterator end() const { 783 return MakeKnownGoodConstIterator(table_ + table_size_); 784 } 785 786 unsigned size() const { 787 DCHECK(!AccessForbidden()); 788 return key_count_; 789 } 790 unsigned Capacity() const { 791 DCHECK(!AccessForbidden()); 792 return table_size_; 793 } 794 bool IsEmpty() const { 795 DCHECK(!AccessForbidden()); 796 return !key_count_; 797 } 798 799 void ReserveCapacityForSize(unsigned size); 800 801 template <typename IncomingValueType> 802 AddResult insert(IncomingValueType&& value) { 803 return insert<IdentityTranslatorType>( 804 Extractor::Extract(value), std::forward<IncomingValueType>(value)); 805 } 806 807 // A special version of insert() that finds the object by hashing and 808 // comparing with some other type, to avoid the cost of type conversion if the 809 // object is already in the table. 810 template <typename HashTranslator, typename T, typename Extra> 811 AddResult insert(T&& key, Extra&&); 812 template <typename HashTranslator, typename T, typename Extra> 813 AddResult InsertPassingHashCode(T&& key, Extra&&); 814 815 iterator find(KeyPeekInType key) { return Find<IdentityTranslatorType>(key); } 816 const_iterator find(KeyPeekInType key) const { 817 return Find<IdentityTranslatorType>(key); 818 } 819 bool Contains(KeyPeekInType key) const { 820 return Contains<IdentityTranslatorType>(key); 821 } 822 823 template <typename HashTranslator, typename T> 824 iterator Find(const T&); 825 template <typename HashTranslator, typename T> 826 const_iterator Find(const T&) const; 827 template <typename HashTranslator, typename T> 828 bool Contains(const T&) const; 829 830 void erase(KeyPeekInType); 831 void erase(iterator); 832 void erase(const_iterator); 833 void clear(); 834 835 static bool IsEmptyBucket(const ValueType& value) { 836 return IsHashTraitsEmptyValue<KeyTraits>(Extractor::Extract(value)); 837 } 838 static bool IsDeletedBucket(const ValueType& value) { 839 return KeyTraits::IsDeletedValue(Extractor::Extract(value)); 840 } 841 static bool IsEmptyOrDeletedBucket(const ValueType& value) { 842 return HashTableHelper<ValueType, Extractor, 843 KeyTraits>::IsEmptyOrDeletedBucket(value); 844 } 845 846 ValueType* Lookup(KeyPeekInType key) { 847 return Lookup<IdentityTranslatorType, KeyPeekInType>(key); 848 } 849 const ValueType* Lookup(KeyPeekInType key) const { 850 return Lookup<IdentityTranslatorType, KeyPeekInType>(key); 851 } 852 template <typename HashTranslator, typename T> 853 ValueType* Lookup(const T&); 854 template <typename HashTranslator, typename T> 855 const ValueType* Lookup(const T&) const; 856 857 ValueType** GetBufferSlot() { return &table_; } 858 859 template <typename VisitorDispatcher, typename A = Allocator> 860 std::enable_if_t<A::kIsGarbageCollected> Trace(VisitorDispatcher) const; 861 862 #if DCHECK_IS_ON() 863 void EnterAccessForbiddenScope() { 864 DCHECK(!access_forbidden_); 865 access_forbidden_ = true; 866 } 867 void LeaveAccessForbiddenScope() { access_forbidden_ = false; } 868 bool AccessForbidden() const { return access_forbidden_; } 869 int64_t Modifications() const { return modifications_; } 870 void RegisterModification() { modifications_++; } 871 // HashTable and collections that build on it do not support modifications 872 // while there is an iterator in use. The exception is ListHashSet, which 873 // has its own iterators that tolerate modification of the underlying set. 874 void CheckModifications(int64_t mods) const { 875 DCHECK_EQ(mods, modifications_); 876 } 877 #else 878 ALWAYS_INLINE void EnterAccessForbiddenScope() {} 879 ALWAYS_INLINE void LeaveAccessForbiddenScope() {} 880 ALWAYS_INLINE bool AccessForbidden() const { return false; } 881 ALWAYS_INLINE int64_t Modifications() const { return 0; } 882 ALWAYS_INLINE void RegisterModification() {} 883 ALWAYS_INLINE void CheckModifications(int64_t mods) const {} 884 #endif 885 886 private: 887 static ValueType* AllocateTable(unsigned size); 888 static void DeleteAllBucketsAndDeallocate(ValueType* table, unsigned size); 889 890 typedef std::pair<ValueType*, bool> LookupType; 891 typedef std::pair<LookupType, unsigned> FullLookupType; 892 893 LookupType LookupForWriting(const Key& key) { 894 return LookupForWriting<IdentityTranslatorType>(key); 895 } 896 template <typename HashTranslator, typename T> 897 FullLookupType FullLookupForWriting(const T&); 898 template <typename HashTranslator, typename T> 899 LookupType LookupForWriting(const T&); 900 901 void erase(const ValueType*); 902 903 bool ShouldExpand() const { 904 return (key_count_ + deleted_count_) * kMaxLoad >= table_size_; 905 } 906 bool MustRehashInPlace() const { 907 return key_count_ * kMinLoad < table_size_ * 2; 908 } 909 bool ShouldShrink() const { 910 // isAllocationAllowed check should be at the last because it's 911 // expensive. 912 return key_count_ * kMinLoad < table_size_ && 913 table_size_ > KeyTraits::kMinimumTableSize && 914 Allocator::IsAllocationAllowed(); 915 } 916 ValueType* Expand(ValueType* entry = nullptr); 917 void Shrink() { Rehash(table_size_ / 2, nullptr); } 918 919 ValueType* ExpandBuffer(unsigned new_table_size, ValueType* entry, bool&); 920 ValueType* RehashTo(ValueType* new_table, 921 unsigned new_table_size, 922 ValueType* entry); 923 ValueType* Rehash(unsigned new_table_size, ValueType* entry); 924 ValueType* Reinsert(ValueType&&); 925 926 static void InitializeBucket(ValueType& bucket); 927 static void DeleteBucket(const ValueType& bucket) { 928 bucket.~ValueType(); 929 Traits::ConstructDeletedValue(const_cast<ValueType&>(bucket), 930 Allocator::kIsGarbageCollected); 931 } 932 933 FullLookupType MakeLookupResult(ValueType* position, 934 bool found, 935 unsigned hash) { 936 return FullLookupType(LookupType(position, found), hash); 937 } 938 939 iterator MakeIterator(ValueType* pos) { 940 return iterator(pos, table_, table_ + table_size_, this); 941 } 942 const_iterator MakeConstIterator(const ValueType* pos) const { 943 return const_iterator(pos, table_, table_ + table_size_, this); 944 } 945 iterator MakeKnownGoodIterator(ValueType* pos) { 946 return iterator(pos, table_, table_ + table_size_, this, 947 kHashItemKnownGood); 948 } 949 const_iterator MakeKnownGoodConstIterator(const ValueType* pos) const { 950 return const_iterator(pos, table_, table_ + table_size_, this, 951 kHashItemKnownGood); 952 } 953 954 static const unsigned kMaxLoad = 2; 955 static const unsigned kMinLoad = 6; 956 957 unsigned TableSizeMask() const { 958 unsigned mask = table_size_ - 1; 959 DCHECK_EQ((mask & table_size_), 0u); 960 return mask; 961 } 962 963 void SetEnqueued() { queue_flag_ = true; } 964 void ClearEnqueued() { queue_flag_ = false; } 965 bool Enqueued() { return queue_flag_; } 966 967 // Constructor for hash tables with raw storage. 968 struct RawStorageTag {}; 969 HashTable(RawStorageTag, ValueType* table, unsigned size) 970 : table_(table), 971 table_size_(size), 972 key_count_(0), 973 deleted_count_(0), 974 queue_flag_(0) 975 #if DCHECK_IS_ON() 976 , 977 access_forbidden_(0), 978 modifications_(0) 979 #endif 980 { 981 } 982 983 ValueType* table_; 984 unsigned table_size_; 985 unsigned key_count_; 986 #if DCHECK_IS_ON() 987 unsigned deleted_count_ : 30; 988 unsigned queue_flag_ : 1; 989 unsigned access_forbidden_ : 1; 990 unsigned modifications_; 991 #else 992 unsigned deleted_count_ : 31; 993 unsigned queue_flag_ : 1; 994 #endif 995 996 #if DUMP_HASHTABLE_STATS_PER_TABLE 997 public: 998 mutable 999 typename std::conditional<Allocator::kIsGarbageCollected, 1000 HashTableStats*, 1001 std::unique_ptr<HashTableStats>>::type stats_; 1002 void DumpStats() { 1003 if (stats_) { 1004 stats_->DumpStats(); 1005 } 1006 } 1007 #endif 1008 1009 template <WeakHandlingFlag x, 1010 typename T, 1011 typename U, 1012 typename V, 1013 typename W, 1014 typename X, 1015 typename Y, 1016 typename Z> 1017 friend struct WeakProcessingHashTableHelper; 1018 template <typename T, typename U, typename V, typename W> 1019 friend class LinkedHashSet; 1020 }; 1021 1022 template <typename Key, 1023 typename Value, 1024 typename Extractor, 1025 typename HashFunctions, 1026 typename Traits, 1027 typename KeyTraits, 1028 typename Allocator> 1029 inline HashTable<Key, 1030 Value, 1031 Extractor, 1032 HashFunctions, 1033 Traits, 1034 KeyTraits, 1035 Allocator>::HashTable() 1036 : table_(nullptr), 1037 table_size_(0), 1038 key_count_(0), 1039 deleted_count_(0), 1040 queue_flag_(false) 1041 #if DCHECK_IS_ON() 1042 , 1043 access_forbidden_(false), 1044 modifications_(0) 1045 #endif 1046 #if DUMP_HASHTABLE_STATS_PER_TABLE 1047 , 1048 stats_(nullptr) 1049 #endif 1050 { 1051 static_assert(Allocator::kIsGarbageCollected || 1052 (!IsPointerToGarbageCollectedType<Key>::value && 1053 !IsPointerToGarbageCollectedType<Value>::value), 1054 "Cannot put raw pointers to garbage-collected classes into an " 1055 "off-heap collection."); 1056 } 1057 1058 inline unsigned DoubleHash(unsigned key) { 1059 key = ~key + (key >> 23); 1060 key ^= (key << 12); 1061 key ^= (key >> 7); 1062 key ^= (key << 2); 1063 key ^= (key >> 20); 1064 return key; 1065 } 1066 1067 inline unsigned CalculateCapacity(unsigned size) { 1068 for (unsigned mask = size; mask; mask >>= 1) 1069 size |= mask; // 00110101010 -> 00111111111 1070 return (size + 1) * 2; // 00111111111 -> 10000000000 1071 } 1072 1073 template <typename Key, 1074 typename Value, 1075 typename Extractor, 1076 typename HashFunctions, 1077 typename Traits, 1078 typename KeyTraits, 1079 typename Allocator> 1080 void HashTable<Key, 1081 Value, 1082 Extractor, 1083 HashFunctions, 1084 Traits, 1085 KeyTraits, 1086 Allocator>::ReserveCapacityForSize(unsigned new_size) { 1087 unsigned new_capacity = CalculateCapacity(new_size); 1088 if (new_capacity < KeyTraits::kMinimumTableSize) 1089 new_capacity = KeyTraits::kMinimumTableSize; 1090 1091 if (new_capacity > Capacity()) { 1092 CHECK(!static_cast<int>( 1093 new_capacity >> 1094 31)); // HashTable capacity should not overflow 32bit int. 1095 Rehash(new_capacity, nullptr); 1096 } 1097 } 1098 1099 template <typename Key, 1100 typename Value, 1101 typename Extractor, 1102 typename HashFunctions, 1103 typename Traits, 1104 typename KeyTraits, 1105 typename Allocator> 1106 template <typename HashTranslator, typename T> 1107 inline Value* 1108 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1109 Lookup(const T& key) { 1110 // Call the const version of Lookup<HashTranslator, T>(). 1111 return const_cast<Value*>( 1112 const_cast<const HashTable*>(this)->Lookup<HashTranslator>(key)); 1113 } 1114 1115 template <typename Key, 1116 typename Value, 1117 typename Extractor, 1118 typename HashFunctions, 1119 typename Traits, 1120 typename KeyTraits, 1121 typename Allocator> 1122 template <typename HashTranslator, typename T> 1123 inline const Value* 1124 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1125 Lookup(const T& key) const { 1126 DCHECK(!AccessForbidden()); 1127 DCHECK((HashTableKeyChecker< 1128 HashTranslator, KeyTraits, 1129 HashFunctions::safe_to_compare_to_empty_or_deleted>::CheckKey(key))); 1130 const ValueType* table = table_; 1131 if (!table) 1132 return nullptr; 1133 1134 size_t k = 0; 1135 size_t size_mask = TableSizeMask(); 1136 unsigned h = HashTranslator::GetHash(key); 1137 size_t i = h & size_mask; 1138 1139 UPDATE_ACCESS_COUNTS(); 1140 1141 while (1) { 1142 const ValueType* entry = table + i; 1143 1144 if (HashFunctions::safe_to_compare_to_empty_or_deleted) { 1145 if (HashTranslator::Equal(Extractor::Extract(*entry), key)) 1146 return entry; 1147 1148 if (IsEmptyBucket(*entry)) 1149 return nullptr; 1150 } else { 1151 if (IsEmptyBucket(*entry)) 1152 return nullptr; 1153 1154 if (!IsDeletedBucket(*entry) && 1155 HashTranslator::Equal(Extractor::Extract(*entry), key)) 1156 return entry; 1157 } 1158 UPDATE_PROBE_COUNTS(); 1159 if (!k) 1160 k = 1 | DoubleHash(h); 1161 i = (i + k) & size_mask; 1162 } 1163 } 1164 1165 template <typename Key, 1166 typename Value, 1167 typename Extractor, 1168 typename HashFunctions, 1169 typename Traits, 1170 typename KeyTraits, 1171 typename Allocator> 1172 template <typename HashTranslator, typename T> 1173 inline typename HashTable<Key, 1174 Value, 1175 Extractor, 1176 HashFunctions, 1177 Traits, 1178 KeyTraits, 1179 Allocator>::LookupType 1180 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1181 LookupForWriting(const T& key) { 1182 DCHECK(!AccessForbidden()); 1183 DCHECK(table_); 1184 RegisterModification(); 1185 1186 ValueType* table = table_; 1187 size_t k = 0; 1188 size_t size_mask = TableSizeMask(); 1189 unsigned h = HashTranslator::GetHash(key); 1190 size_t i = h & size_mask; 1191 1192 UPDATE_ACCESS_COUNTS(); 1193 1194 ValueType* deleted_entry = nullptr; 1195 1196 while (1) { 1197 ValueType* entry = table + i; 1198 1199 if (IsEmptyBucket(*entry)) 1200 return LookupType(deleted_entry ? deleted_entry : entry, false); 1201 1202 if (HashFunctions::safe_to_compare_to_empty_or_deleted) { 1203 if (HashTranslator::Equal(Extractor::Extract(*entry), key)) 1204 return LookupType(entry, true); 1205 1206 if (IsDeletedBucket(*entry)) 1207 deleted_entry = entry; 1208 } else { 1209 if (IsDeletedBucket(*entry)) 1210 deleted_entry = entry; 1211 else if (HashTranslator::Equal(Extractor::Extract(*entry), key)) 1212 return LookupType(entry, true); 1213 } 1214 UPDATE_PROBE_COUNTS(); 1215 if (!k) 1216 k = 1 | DoubleHash(h); 1217 i = (i + k) & size_mask; 1218 } 1219 } 1220 1221 template <typename Key, 1222 typename Value, 1223 typename Extractor, 1224 typename HashFunctions, 1225 typename Traits, 1226 typename KeyTraits, 1227 typename Allocator> 1228 template <typename HashTranslator, typename T> 1229 inline typename HashTable<Key, 1230 Value, 1231 Extractor, 1232 HashFunctions, 1233 Traits, 1234 KeyTraits, 1235 Allocator>::FullLookupType 1236 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1237 FullLookupForWriting(const T& key) { 1238 DCHECK(!AccessForbidden()); 1239 DCHECK(table_); 1240 RegisterModification(); 1241 1242 ValueType* table = table_; 1243 size_t k = 0; 1244 size_t size_mask = TableSizeMask(); 1245 unsigned h = HashTranslator::GetHash(key); 1246 size_t i = h & size_mask; 1247 1248 UPDATE_ACCESS_COUNTS(); 1249 1250 ValueType* deleted_entry = nullptr; 1251 1252 while (1) { 1253 ValueType* entry = table + i; 1254 1255 if (IsEmptyBucket(*entry)) 1256 return MakeLookupResult(deleted_entry ? deleted_entry : entry, false, h); 1257 1258 if (HashFunctions::safe_to_compare_to_empty_or_deleted) { 1259 if (HashTranslator::Equal(Extractor::Extract(*entry), key)) 1260 return MakeLookupResult(entry, true, h); 1261 1262 if (IsDeletedBucket(*entry)) 1263 deleted_entry = entry; 1264 } else { 1265 if (IsDeletedBucket(*entry)) 1266 deleted_entry = entry; 1267 else if (HashTranslator::Equal(Extractor::Extract(*entry), key)) 1268 return MakeLookupResult(entry, true, h); 1269 } 1270 UPDATE_PROBE_COUNTS(); 1271 if (!k) 1272 k = 1 | DoubleHash(h); 1273 i = (i + k) & size_mask; 1274 } 1275 } 1276 1277 template <bool emptyValueIsZero> 1278 struct HashTableBucketInitializer; 1279 1280 template <> 1281 struct HashTableBucketInitializer<false> { 1282 STATIC_ONLY(HashTableBucketInitializer); 1283 template <typename Traits, typename Allocator, typename Value> 1284 static void Initialize(Value& bucket) { 1285 ConstructTraits<Value, Traits, Allocator>::ConstructAndNotifyElement( 1286 &bucket, Traits::EmptyValue()); 1287 } 1288 }; 1289 1290 template <> 1291 struct HashTableBucketInitializer<true> { 1292 STATIC_ONLY(HashTableBucketInitializer); 1293 template <typename Traits, typename Allocator, typename Value> 1294 static void Initialize(Value& bucket) { 1295 // This initializes the bucket without copying the empty value. That 1296 // makes it possible to use this with types that don't support copying. 1297 // The memset to 0 looks like a slow operation but is optimized by the 1298 // compilers. 1299 memset(&bucket, 0, sizeof(bucket)); 1300 } 1301 }; 1302 1303 template <typename Key, 1304 typename Value, 1305 typename Extractor, 1306 typename HashFunctions, 1307 typename Traits, 1308 typename KeyTraits, 1309 typename Allocator> 1310 inline void 1311 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1312 InitializeBucket(ValueType& bucket) { 1313 HashTableBucketInitializer<Traits::kEmptyValueIsZero>::template Initialize< 1314 Traits, Allocator>(bucket); 1315 } 1316 1317 template <typename Key, 1318 typename Value, 1319 typename Extractor, 1320 typename HashFunctions, 1321 typename Traits, 1322 typename KeyTraits, 1323 typename Allocator> 1324 template <typename HashTranslator, typename T, typename Extra> 1325 typename HashTable<Key, 1326 Value, 1327 Extractor, 1328 HashFunctions, 1329 Traits, 1330 KeyTraits, 1331 Allocator>::AddResult 1332 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1333 insert(T&& key, Extra&& extra) { 1334 DCHECK(!AccessForbidden()); 1335 DCHECK(Allocator::IsAllocationAllowed()); 1336 if (!table_) 1337 Expand(); 1338 1339 DCHECK(table_); 1340 1341 ValueType* table = table_; 1342 size_t k = 0; 1343 size_t size_mask = TableSizeMask(); 1344 unsigned h = HashTranslator::GetHash(key); 1345 size_t i = h & size_mask; 1346 1347 UPDATE_ACCESS_COUNTS(); 1348 1349 ValueType* deleted_entry = nullptr; 1350 ValueType* entry; 1351 while (1) { 1352 entry = table + i; 1353 1354 if (IsEmptyBucket(*entry)) 1355 break; 1356 1357 if (HashFunctions::safe_to_compare_to_empty_or_deleted) { 1358 if (HashTranslator::Equal(Extractor::Extract(*entry), key)) 1359 return AddResult(this, entry, false); 1360 1361 if (IsDeletedBucket(*entry)) 1362 deleted_entry = entry; 1363 } else { 1364 if (IsDeletedBucket(*entry)) 1365 deleted_entry = entry; 1366 else if (HashTranslator::Equal(Extractor::Extract(*entry), key)) 1367 return AddResult(this, entry, false); 1368 } 1369 UPDATE_PROBE_COUNTS(); 1370 if (!k) 1371 k = 1 | DoubleHash(h); 1372 i = (i + k) & size_mask; 1373 } 1374 1375 RegisterModification(); 1376 1377 if (deleted_entry) { 1378 // Overwrite any data left over from last use, using placement new or 1379 // memset. 1380 InitializeBucket(*deleted_entry); 1381 entry = deleted_entry; 1382 --deleted_count_; 1383 } 1384 1385 HashTranslator::Translate(*entry, std::forward<T>(key), 1386 std::forward<Extra>(extra)); 1387 DCHECK(!IsEmptyOrDeletedBucket(*entry)); 1388 // Translate constructs an element so we need to notify using the trait. Avoid 1389 // doing that in the translator so that they can be easily customized. 1390 ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElement(entry); 1391 1392 ++key_count_; 1393 1394 if (ShouldExpand()) { 1395 entry = Expand(entry); 1396 } else if (WTF::IsWeak<ValueType>::value && ShouldShrink()) { 1397 // When weak hash tables are processed by the garbage collector, 1398 // elements with no other strong references to them will have their 1399 // table entries cleared. But no shrinking of the backing store is 1400 // allowed at that time, as allocations are prohibited during that 1401 // GC phase. 1402 // 1403 // With that weak processing taking care of removals, explicit 1404 // erase()s of elements is rarely done. Which implies that the 1405 // weak hash table will never be checked if it can be shrunk. 1406 // 1407 // To prevent weak hash tables with very low load factors from 1408 // developing, we perform it when adding elements instead. 1409 entry = Rehash(table_size_ / 2, entry); 1410 } 1411 1412 return AddResult(this, entry, true); 1413 } 1414 1415 template <typename Key, 1416 typename Value, 1417 typename Extractor, 1418 typename HashFunctions, 1419 typename Traits, 1420 typename KeyTraits, 1421 typename Allocator> 1422 template <typename HashTranslator, typename T, typename Extra> 1423 typename HashTable<Key, 1424 Value, 1425 Extractor, 1426 HashFunctions, 1427 Traits, 1428 KeyTraits, 1429 Allocator>::AddResult 1430 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1431 InsertPassingHashCode(T&& key, Extra&& extra) { 1432 DCHECK(!AccessForbidden()); 1433 DCHECK(Allocator::IsAllocationAllowed()); 1434 if (!table_) 1435 Expand(); 1436 1437 FullLookupType lookup_result = FullLookupForWriting<HashTranslator>(key); 1438 1439 ValueType* entry = lookup_result.first.first; 1440 bool found = lookup_result.first.second; 1441 unsigned h = lookup_result.second; 1442 1443 if (found) 1444 return AddResult(this, entry, false); 1445 1446 RegisterModification(); 1447 1448 if (IsDeletedBucket(*entry)) { 1449 InitializeBucket(*entry); 1450 --deleted_count_; 1451 } 1452 1453 HashTranslator::Translate(*entry, std::forward<T>(key), 1454 std::forward<Extra>(extra), h); 1455 DCHECK(!IsEmptyOrDeletedBucket(*entry)); 1456 // Translate constructs an element so we need to notify using the trait. Avoid 1457 // doing that in the translator so that they can be easily customized. 1458 ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElement(entry); 1459 1460 ++key_count_; 1461 if (ShouldExpand()) 1462 entry = Expand(entry); 1463 1464 return AddResult(this, entry, true); 1465 } 1466 1467 template <typename Key, 1468 typename Value, 1469 typename Extractor, 1470 typename HashFunctions, 1471 typename Traits, 1472 typename KeyTraits, 1473 typename Allocator> 1474 Value* 1475 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1476 Reinsert(ValueType&& entry) { 1477 DCHECK(table_); 1478 RegisterModification(); 1479 DCHECK(!LookupForWriting(Extractor::Extract(entry)).second); 1480 DCHECK( 1481 !IsDeletedBucket(*(LookupForWriting(Extractor::Extract(entry)).first))); 1482 #if DUMP_HASHTABLE_STATS 1483 HashTableStats::instance().numReinserts.fetch_add(1, 1484 std::memory_order_relaxed); 1485 #endif 1486 #if DUMP_HASHTABLE_STATS_PER_TABLE 1487 stats_->numReinserts.fetch_add(1, std::memory_order_relaxed); 1488 #endif 1489 Value* new_entry = LookupForWriting(Extractor::Extract(entry)).first; 1490 Mover<ValueType, Allocator, Traits, 1491 Traits::template NeedsToForbidGCOnMove<>::value>::Move(std::move(entry), 1492 *new_entry); 1493 1494 return new_entry; 1495 } 1496 1497 template <typename Key, 1498 typename Value, 1499 typename Extractor, 1500 typename HashFunctions, 1501 typename Traits, 1502 typename KeyTraits, 1503 typename Allocator> 1504 template <typename HashTranslator, typename T> 1505 inline typename HashTable<Key, 1506 Value, 1507 Extractor, 1508 HashFunctions, 1509 Traits, 1510 KeyTraits, 1511 Allocator>::iterator 1512 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1513 Find(const T& key) { 1514 ValueType* entry = Lookup<HashTranslator>(key); 1515 if (!entry) 1516 return end(); 1517 1518 return MakeKnownGoodIterator(entry); 1519 } 1520 1521 template <typename Key, 1522 typename Value, 1523 typename Extractor, 1524 typename HashFunctions, 1525 typename Traits, 1526 typename KeyTraits, 1527 typename Allocator> 1528 template <typename HashTranslator, typename T> 1529 inline typename HashTable<Key, 1530 Value, 1531 Extractor, 1532 HashFunctions, 1533 Traits, 1534 KeyTraits, 1535 Allocator>::const_iterator 1536 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1537 Find(const T& key) const { 1538 const ValueType* entry = Lookup<HashTranslator>(key); 1539 if (!entry) 1540 return end(); 1541 1542 return MakeKnownGoodConstIterator(entry); 1543 } 1544 1545 template <typename Key, 1546 typename Value, 1547 typename Extractor, 1548 typename HashFunctions, 1549 typename Traits, 1550 typename KeyTraits, 1551 typename Allocator> 1552 template <typename HashTranslator, typename T> 1553 bool HashTable<Key, 1554 Value, 1555 Extractor, 1556 HashFunctions, 1557 Traits, 1558 KeyTraits, 1559 Allocator>::Contains(const T& key) const { 1560 return Lookup<HashTranslator>(key); 1561 } 1562 1563 template <typename Key, 1564 typename Value, 1565 typename Extractor, 1566 typename HashFunctions, 1567 typename Traits, 1568 typename KeyTraits, 1569 typename Allocator> 1570 void HashTable<Key, 1571 Value, 1572 Extractor, 1573 HashFunctions, 1574 Traits, 1575 KeyTraits, 1576 Allocator>::erase(const ValueType* pos) { 1577 RegisterModification(); 1578 #if DUMP_HASHTABLE_STATS 1579 HashTableStats::instance().numRemoves.fetch_add(1, std::memory_order_relaxed); 1580 #endif 1581 #if DUMP_HASHTABLE_STATS_PER_TABLE 1582 stats_->numRemoves.fetch_add(1, std::memory_order_relaxed); 1583 #endif 1584 1585 EnterAccessForbiddenScope(); 1586 DeleteBucket(*pos); 1587 LeaveAccessForbiddenScope(); 1588 ++deleted_count_; 1589 --key_count_; 1590 1591 if (ShouldShrink()) 1592 Shrink(); 1593 } 1594 1595 template <typename Key, 1596 typename Value, 1597 typename Extractor, 1598 typename HashFunctions, 1599 typename Traits, 1600 typename KeyTraits, 1601 typename Allocator> 1602 inline void 1603 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1604 erase(iterator it) { 1605 if (it == end()) 1606 return; 1607 erase(it.iterator_.position_); 1608 } 1609 1610 template <typename Key, 1611 typename Value, 1612 typename Extractor, 1613 typename HashFunctions, 1614 typename Traits, 1615 typename KeyTraits, 1616 typename Allocator> 1617 inline void 1618 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1619 erase(const_iterator it) { 1620 if (it == end()) 1621 return; 1622 erase(it.position_); 1623 } 1624 1625 template <typename Key, 1626 typename Value, 1627 typename Extractor, 1628 typename HashFunctions, 1629 typename Traits, 1630 typename KeyTraits, 1631 typename Allocator> 1632 inline void 1633 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1634 erase(KeyPeekInType key) { 1635 erase(find(key)); 1636 } 1637 1638 template <typename Key, 1639 typename Value, 1640 typename Extractor, 1641 typename HashFunctions, 1642 typename Traits, 1643 typename KeyTraits, 1644 typename Allocator> 1645 Value* 1646 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1647 AllocateTable(unsigned size) { 1648 size_t alloc_size = base::CheckMul(size, sizeof(ValueType)).ValueOrDie(); 1649 ValueType* result; 1650 // Assert that we will not use memset on things with a vtable entry. The 1651 // compiler will also check this on some platforms. We would like to check 1652 // this on the whole value (key-value pair), but std::is_polymorphic will 1653 // return false for a pair of two types, even if one of the components is 1654 // polymorphic. 1655 static_assert( 1656 !Traits::kEmptyValueIsZero || !std::is_polymorphic<KeyType>::value, 1657 "empty value cannot be zero for things with a vtable"); 1658 static_assert( 1659 Allocator::kIsGarbageCollected || 1660 ((!IsDisallowNew<KeyType>::value || !IsTraceable<KeyType>::value) && 1661 (!IsDisallowNew<ValueType>::value || 1662 !IsTraceable<ValueType>::value)), 1663 "Cannot put DISALLOW_NEW objects that " 1664 "have trace methods into an off-heap HashTable"); 1665 1666 if (Traits::kEmptyValueIsZero) { 1667 result = Allocator::template AllocateZeroedHashTableBacking<ValueType, 1668 HashTable>( 1669 alloc_size); 1670 } else { 1671 result = Allocator::template AllocateHashTableBacking<ValueType, HashTable>( 1672 alloc_size); 1673 for (unsigned i = 0; i < size; i++) 1674 InitializeBucket(result[i]); 1675 } 1676 return result; 1677 } 1678 1679 template <typename Key, 1680 typename Value, 1681 typename Extractor, 1682 typename HashFunctions, 1683 typename Traits, 1684 typename KeyTraits, 1685 typename Allocator> 1686 void HashTable<Key, 1687 Value, 1688 Extractor, 1689 HashFunctions, 1690 Traits, 1691 KeyTraits, 1692 Allocator>::DeleteAllBucketsAndDeallocate(ValueType* table, 1693 unsigned size) { 1694 // We delete a bucket in the following cases: 1695 // - It is not trivially destructible. 1696 // - The table is weak (thus garbage collected) and we are currently marking. 1697 // This is to handle the case where a backing store is removed from the 1698 // HashTable after HashTable has been enqueued for processing. If we remove 1699 // the backing in that case it stays unprocessed which upsets the marking 1700 // verifier that checks that all backings are in consistent state. 1701 const bool needs_bucket_deletion = 1702 !std::is_trivially_destructible<ValueType>::value || 1703 (WTF::IsWeak<ValueType>::value && Allocator::IsIncrementalMarking()); 1704 if (needs_bucket_deletion) { 1705 for (unsigned i = 0; i < size; ++i) { 1706 // This code is called when the hash table is cleared or resized. We 1707 // have allocated a new backing store and we need to run the 1708 // destructors on the old backing store, as it is being freed. If we 1709 // are GCing we need to both call the destructor and mark the bucket 1710 // as deleted, otherwise the destructor gets called again when the 1711 // GC finds the backing store. With the default allocator it's 1712 // enough to call the destructor, since we will free the memory 1713 // explicitly and we won't see the memory with the bucket again. 1714 if (Allocator::kIsGarbageCollected) { 1715 if (!IsEmptyOrDeletedBucket(table[i])) 1716 DeleteBucket(table[i]); 1717 } else { 1718 if (!IsDeletedBucket(table[i])) 1719 table[i].~ValueType(); 1720 } 1721 } 1722 } 1723 Allocator::FreeHashTableBacking(table); 1724 } 1725 1726 template <typename Key, 1727 typename Value, 1728 typename Extractor, 1729 typename HashFunctions, 1730 typename Traits, 1731 typename KeyTraits, 1732 typename Allocator> 1733 Value* 1734 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1735 Expand(Value* entry) { 1736 unsigned new_size; 1737 if (!table_size_) { 1738 new_size = KeyTraits::kMinimumTableSize; 1739 } else if (MustRehashInPlace()) { 1740 new_size = table_size_; 1741 } else { 1742 new_size = table_size_ * 2; 1743 CHECK_GT(new_size, table_size_); 1744 } 1745 1746 return Rehash(new_size, entry); 1747 } 1748 1749 template <typename Key, 1750 typename Value, 1751 typename Extractor, 1752 typename HashFunctions, 1753 typename Traits, 1754 typename KeyTraits, 1755 typename Allocator> 1756 Value* 1757 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1758 ExpandBuffer(unsigned new_table_size, Value* entry, bool& success) { 1759 success = false; 1760 DCHECK_LT(table_size_, new_table_size); 1761 CHECK(Allocator::IsAllocationAllowed()); 1762 if (!Allocator::ExpandHashTableBacking(table_, 1763 new_table_size * sizeof(ValueType))) 1764 return nullptr; 1765 1766 success = true; 1767 1768 Value* new_entry = nullptr; 1769 unsigned old_table_size = table_size_; 1770 ValueType* original_table = table_; 1771 1772 ValueType* temporary_table = AllocateTable(old_table_size); 1773 for (unsigned i = 0; i < old_table_size; i++) { 1774 if (&table_[i] == entry) 1775 new_entry = &temporary_table[i]; 1776 if (IsEmptyOrDeletedBucket(table_[i])) { 1777 DCHECK_NE(&table_[i], entry); 1778 if (Traits::kEmptyValueIsZero) { 1779 memset(&temporary_table[i], 0, sizeof(ValueType)); 1780 } else { 1781 InitializeBucket(temporary_table[i]); 1782 } 1783 } else { 1784 Mover<ValueType, Allocator, Traits, 1785 Traits::template NeedsToForbidGCOnMove<>::value>:: 1786 Move(std::move(table_[i]), temporary_table[i]); 1787 table_[i].~ValueType(); 1788 } 1789 } 1790 table_ = temporary_table; 1791 Allocator::template BackingWriteBarrierForHashTable<HashTable>(&table_); 1792 1793 if (Traits::kEmptyValueIsZero) { 1794 memset(original_table, 0, new_table_size * sizeof(ValueType)); 1795 } else { 1796 for (unsigned i = 0; i < new_table_size; i++) 1797 InitializeBucket(original_table[i]); 1798 } 1799 new_entry = RehashTo(original_table, new_table_size, new_entry); 1800 1801 return new_entry; 1802 } 1803 1804 template <typename Key, 1805 typename Value, 1806 typename Extractor, 1807 typename HashFunctions, 1808 typename Traits, 1809 typename KeyTraits, 1810 typename Allocator> 1811 Value* 1812 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1813 RehashTo(ValueType* new_table, unsigned new_table_size, Value* entry) { 1814 #if DUMP_HASHTABLE_STATS 1815 if (table_size_ != 0) { 1816 HashTableStats::instance().numRehashes.fetch_add(1, 1817 std::memory_order_relaxed); 1818 } 1819 #endif 1820 1821 #if DUMP_HASHTABLE_STATS_PER_TABLE 1822 if (table_size_ != 0) 1823 stats_->numRehashes.fetch_add(1, std::memory_order_relaxed); 1824 #endif 1825 1826 HashTable new_hash_table(RawStorageTag{}, new_table, new_table_size); 1827 1828 Value* new_entry = nullptr; 1829 for (unsigned i = 0; i != table_size_; ++i) { 1830 if (IsEmptyOrDeletedBucket(table_[i])) { 1831 DCHECK_NE(&table_[i], entry); 1832 continue; 1833 } 1834 Value* reinserted_entry = new_hash_table.Reinsert(std::move(table_[i])); 1835 if (&table_[i] == entry) { 1836 DCHECK(!new_entry); 1837 new_entry = reinserted_entry; 1838 } 1839 } 1840 1841 Allocator::TraceBackingStoreIfMarked(new_hash_table.table_); 1842 1843 ValueType* old_table = table_; 1844 unsigned old_table_size = table_size_; 1845 1846 // This swaps the newly allocated buffer with the current one. The store to 1847 // the current table has to be atomic to prevent races with concurrent marker. 1848 AsAtomicPtr(&table_)->store(new_hash_table.table_, std::memory_order_relaxed); 1849 Allocator::template BackingWriteBarrierForHashTable<HashTable>(&table_); 1850 table_size_ = new_table_size; 1851 1852 new_hash_table.table_ = old_table; 1853 new_hash_table.table_size_ = old_table_size; 1854 1855 // Explicitly clear since garbage collected HashTables don't do this on 1856 // destruction. 1857 new_hash_table.clear(); 1858 1859 deleted_count_ = 0; 1860 1861 #if DUMP_HASHTABLE_STATS_PER_TABLE 1862 if (!stats_) 1863 stats_ = HashTableStatsPtr<Allocator>::Create(); 1864 #endif 1865 1866 return new_entry; 1867 } 1868 1869 template <typename Key, 1870 typename Value, 1871 typename Extractor, 1872 typename HashFunctions, 1873 typename Traits, 1874 typename KeyTraits, 1875 typename Allocator> 1876 Value* 1877 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1878 Rehash(unsigned new_table_size, Value* entry) { 1879 unsigned old_table_size = table_size_; 1880 1881 #if DUMP_HASHTABLE_STATS 1882 if (old_table_size != 0) { 1883 HashTableStats::instance().numRehashes.fetch_add(1, 1884 std::memory_order_relaxed); 1885 } 1886 #endif 1887 1888 #if DUMP_HASHTABLE_STATS_PER_TABLE 1889 if (old_table_size != 0) 1890 stats_->numRehashes.fetch_add(1, std::memory_order_relaxed); 1891 #endif 1892 1893 // The Allocator::kIsGarbageCollected check is not needed. The check is just 1894 // a static hint for a compiler to indicate that Base::expandBuffer returns 1895 // false if Allocator is a PartitionAllocator. 1896 if (Allocator::kIsGarbageCollected && new_table_size > old_table_size) { 1897 bool success; 1898 Value* new_entry = ExpandBuffer(new_table_size, entry, success); 1899 if (success) 1900 return new_entry; 1901 } 1902 1903 ValueType* new_table = AllocateTable(new_table_size); 1904 Value* new_entry = RehashTo(new_table, new_table_size, entry); 1905 1906 return new_entry; 1907 } 1908 1909 template <typename Key, 1910 typename Value, 1911 typename Extractor, 1912 typename HashFunctions, 1913 typename Traits, 1914 typename KeyTraits, 1915 typename Allocator> 1916 void HashTable<Key, 1917 Value, 1918 Extractor, 1919 HashFunctions, 1920 Traits, 1921 KeyTraits, 1922 Allocator>::clear() { 1923 RegisterModification(); 1924 if (!table_) 1925 return; 1926 1927 EnterAccessForbiddenScope(); 1928 DeleteAllBucketsAndDeallocate(table_, table_size_); 1929 LeaveAccessForbiddenScope(); 1930 AsAtomicPtr(&table_)->store(nullptr, std::memory_order_relaxed); 1931 table_size_ = 0; 1932 key_count_ = 0; 1933 } 1934 1935 template <typename Key, 1936 typename Value, 1937 typename Extractor, 1938 typename HashFunctions, 1939 typename Traits, 1940 typename KeyTraits, 1941 typename Allocator> 1942 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1943 HashTable(const HashTable& other) 1944 : table_(nullptr), 1945 table_size_(0), 1946 key_count_(0), 1947 deleted_count_(0), 1948 queue_flag_(false) 1949 #if DCHECK_IS_ON() 1950 , 1951 access_forbidden_(false), 1952 modifications_(0) 1953 #endif 1954 #if DUMP_HASHTABLE_STATS_PER_TABLE 1955 , 1956 stats_(HashTableStatsPtr<Allocator>::copy(other.stats_)) 1957 #endif 1958 { 1959 if (other.size()) 1960 ReserveCapacityForSize(other.size()); 1961 // Copy the hash table the dumb way, by adding each element to the new 1962 // table. It might be more efficient to copy the table slots, but it's not 1963 // clear that efficiency is needed. 1964 for (const auto& element : other) 1965 insert(element); 1966 } 1967 1968 template <typename Key, 1969 typename Value, 1970 typename Extractor, 1971 typename HashFunctions, 1972 typename Traits, 1973 typename KeyTraits, 1974 typename Allocator> 1975 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 1976 HashTable(HashTable&& other) 1977 : table_(nullptr), 1978 table_size_(0), 1979 key_count_(0), 1980 deleted_count_(0), 1981 queue_flag_(false) 1982 #if DCHECK_IS_ON() 1983 , 1984 access_forbidden_(false), 1985 modifications_(0) 1986 #endif 1987 #if DUMP_HASHTABLE_STATS_PER_TABLE 1988 , 1989 stats_(HashTableStatsPtr<Allocator>::copy(other.stats_)) 1990 #endif 1991 { 1992 swap(other); 1993 } 1994 1995 template <typename Key, 1996 typename Value, 1997 typename Extractor, 1998 typename HashFunctions, 1999 typename Traits, 2000 typename KeyTraits, 2001 typename Allocator> 2002 void HashTable<Key, 2003 Value, 2004 Extractor, 2005 HashFunctions, 2006 Traits, 2007 KeyTraits, 2008 Allocator>::swap(HashTable& other) { 2009 DCHECK(!AccessForbidden()); 2010 // Following 3 lines swap table_ and other.table_ using atomic stores. These 2011 // are needed for Oilpan concurrent marking which might trace the hash table 2012 // while it is being swapped (i.e. the atomic stores are to avoid a data 2013 // race). Atomic reads are not needed here because this method is only called 2014 // on the mutator thread, which is also the only one that writes to them, so 2015 // there is *no* risk of data races when reading. 2016 AtomicWriteSwap(table_, other.table_); 2017 Allocator::template BackingWriteBarrierForHashTable<HashTable>(&table_); 2018 Allocator::template BackingWriteBarrierForHashTable<HashTable>(&other.table_); 2019 if (IsWeak<ValueType>::value) { 2020 // Weak processing is omitted when no backing store is present. In case such 2021 // an empty table is later on used it needs to be strongified. 2022 if (table_) 2023 Allocator::TraceBackingStoreIfMarked(table_); 2024 if (other.table_) 2025 Allocator::TraceBackingStoreIfMarked(other.table_); 2026 } 2027 std::swap(table_size_, other.table_size_); 2028 std::swap(key_count_, other.key_count_); 2029 // std::swap does not work for bit fields. 2030 unsigned deleted = deleted_count_; 2031 deleted_count_ = other.deleted_count_; 2032 other.deleted_count_ = deleted; 2033 DCHECK(!queue_flag_); 2034 DCHECK(!other.queue_flag_); 2035 2036 #if DCHECK_IS_ON() 2037 std::swap(modifications_, other.modifications_); 2038 #endif 2039 2040 #if DUMP_HASHTABLE_STATS_PER_TABLE 2041 HashTableStatsPtr<Allocator>::swap(stats_, other.stats_); 2042 #endif 2043 } 2044 2045 template <typename Key, 2046 typename Value, 2047 typename Extractor, 2048 typename HashFunctions, 2049 typename Traits, 2050 typename KeyTraits, 2051 typename Allocator> 2052 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& 2053 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 2054 operator=(const HashTable& other) { 2055 HashTable tmp(other); 2056 swap(tmp); 2057 return *this; 2058 } 2059 2060 template <typename Key, 2061 typename Value, 2062 typename Extractor, 2063 typename HashFunctions, 2064 typename Traits, 2065 typename KeyTraits, 2066 typename Allocator> 2067 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& 2068 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 2069 operator=(HashTable&& other) { 2070 swap(other); 2071 return *this; 2072 } 2073 2074 template <WeakHandlingFlag weakHandlingFlag, 2075 typename Key, 2076 typename Value, 2077 typename Extractor, 2078 typename HashFunctions, 2079 typename Traits, 2080 typename KeyTraits, 2081 typename Allocator> 2082 struct WeakProcessingHashTableHelper { 2083 STATIC_ONLY(WeakProcessingHashTableHelper); 2084 static void Process(const typename Allocator::WeakCallbackInfo&, 2085 const void*) {} 2086 }; 2087 2088 template <typename Key, 2089 typename Value, 2090 typename Extractor, 2091 typename HashFunctions, 2092 typename Traits, 2093 typename KeyTraits, 2094 typename Allocator> 2095 struct WeakProcessingHashTableHelper<kWeakHandling, 2096 Key, 2097 Value, 2098 Extractor, 2099 HashFunctions, 2100 Traits, 2101 KeyTraits, 2102 Allocator> { 2103 STATIC_ONLY(WeakProcessingHashTableHelper); 2104 2105 using HashTableType = HashTable<Key, 2106 Value, 2107 Extractor, 2108 HashFunctions, 2109 Traits, 2110 KeyTraits, 2111 Allocator>; 2112 using ValueType = typename HashTableType::ValueType; 2113 2114 // Used for purely weak and for weak-and-strong tables (ephemerons). 2115 static void Process(const typename Allocator::WeakCallbackInfo&, 2116 const void* parameter) { 2117 HashTableType* table = 2118 reinterpret_cast<HashTableType*>(const_cast<void*>(parameter)); 2119 // During incremental marking, the table may be freed after the callback has 2120 // been registered. 2121 if (!table->table_) 2122 return; 2123 2124 // Weak processing: If the backing was accessible through an iterator and 2125 // thus marked strongly this loop will find all buckets as non-empty. 2126 for (ValueType* element = table->table_ + table->table_size_ - 1; 2127 element >= table->table_; element--) { 2128 if (!HashTableType::IsEmptyOrDeletedBucket(*element)) { 2129 if (!TraceInCollectionTrait<kWeakHandling, ValueType, Traits>::IsAlive( 2130 *element)) { 2131 table->RegisterModification(); 2132 HashTableType::DeleteBucket(*element); // Also calls the destructor. 2133 table->deleted_count_++; 2134 table->key_count_--; 2135 // We don't rehash the backing until the next add or delete, 2136 // because that would cause allocation during GC. 2137 } 2138 } 2139 } 2140 } 2141 }; 2142 2143 template <typename Key, 2144 typename Value, 2145 typename Extractor, 2146 typename HashFunctions, 2147 typename Traits, 2148 typename KeyTraits, 2149 typename Allocator> 2150 template <typename VisitorDispatcher, typename A> 2151 std::enable_if_t<A::kIsGarbageCollected> 2152 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: 2153 Trace(VisitorDispatcher visitor) const { 2154 // bail out for concurrent marking 2155 if (visitor->ConcurrentTracingBailOut( 2156 {this, [](blink::Visitor* visitor, const void* object) { 2157 reinterpret_cast< 2158 const HashTable<Key, Value, Extractor, HashFunctions, Traits, 2159 KeyTraits, Allocator>*>(object) 2160 ->Trace(visitor); 2161 }})) 2162 return; 2163 2164 static_assert(WTF::IsWeak<ValueType>::value || 2165 IsTraceableInCollectionTrait<Traits>::value, 2166 "Value should not be traced"); 2167 const ValueType* table = 2168 AsAtomicPtr(&table_)->load(std::memory_order_relaxed); 2169 if (!WTF::IsWeak<ValueType>::value) { 2170 // Strong HashTable. 2171 Allocator::template TraceHashTableBackingStrongly<ValueType, HashTable>( 2172 visitor, table, &table_); 2173 } else { 2174 // Weak HashTable. The HashTable may be held alive strongly from somewhere 2175 // else, e.g., an iterator. 2176 2177 // Only trace the backing store. Its buckets will be processed after 2178 // marking. The interesting cases for marking are: 2179 // - The backing is dropped using clear(): The backing can still be 2180 // compacted but empty/deleted buckets will only be destroyed once the 2181 // backing is reclaimed by the garbage collector on the next cycle. 2182 // - The hash table expands/shrinks: Buckets are moved to the new backing 2183 // store and strongified, resulting in all buckets being alive. The old 2184 // backing store is marked but only contains empty/deleted buckets as all 2185 // non-empty/deleted buckets have been moved to the new backing store. 2186 Allocator::template TraceHashTableBackingOnly<ValueType, HashTable>( 2187 visitor, table, &table_); 2188 // Trace the table weakly. For marking this will result in delaying the 2189 // processing until the end of the atomic pause. It is safe to trace 2190 // weakly multiple times. 2191 Allocator::template TraceHashTableBackingWeakly<ValueType, HashTable>( 2192 visitor, table, &table_, 2193 WeakProcessingHashTableHelper<WeakHandlingTrait<ValueType>::value, Key, 2194 Value, Extractor, HashFunctions, Traits, 2195 KeyTraits, Allocator>::Process, 2196 this); 2197 } 2198 } 2199 2200 // iterator adapters 2201 2202 template <typename HashTableType, typename Traits> 2203 struct HashTableConstIteratorAdapter { 2204 STACK_ALLOCATED(); 2205 2206 public: 2207 HashTableConstIteratorAdapter() = default; 2208 HashTableConstIteratorAdapter( 2209 const typename HashTableType::const_iterator& impl) 2210 : impl_(impl) {} 2211 typedef typename Traits::IteratorConstGetType GetType; 2212 typedef 2213 typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType; 2214 2215 GetType Get() const { 2216 return const_cast<GetType>(SourceGetType(impl_.Get())); 2217 } 2218 typename Traits::IteratorConstReferenceType operator*() const { 2219 return Traits::GetToReferenceConstConversion(Get()); 2220 } 2221 GetType operator->() const { return Get(); } 2222 2223 HashTableConstIteratorAdapter& operator++() { 2224 ++impl_; 2225 return *this; 2226 } 2227 // postfix ++ intentionally omitted 2228 HashTableConstIteratorAdapter& operator--() { 2229 --impl_; 2230 return *this; 2231 } 2232 // postfix -- intentionally omitted 2233 typename HashTableType::const_iterator impl_; 2234 }; 2235 2236 template <typename HashTable, typename Traits> 2237 std::ostream& operator<<( 2238 std::ostream& stream, 2239 const HashTableConstIteratorAdapter<HashTable, Traits>& iterator) { 2240 return stream << iterator.impl_; 2241 } 2242 2243 template <typename HashTableType, typename Traits> 2244 struct HashTableIteratorAdapter { 2245 STACK_ALLOCATED(); 2246 typedef typename Traits::IteratorGetType GetType; 2247 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType; 2248 2249 HashTableIteratorAdapter() = default; 2250 HashTableIteratorAdapter(const typename HashTableType::iterator& impl) 2251 : impl_(impl) {} 2252 2253 GetType Get() const { 2254 return const_cast<GetType>(SourceGetType(impl_.get())); 2255 } 2256 typename Traits::IteratorReferenceType operator*() const { 2257 return Traits::GetToReferenceConversion(Get()); 2258 } 2259 GetType operator->() const { return Get(); } 2260 2261 HashTableIteratorAdapter& operator++() { 2262 ++impl_; 2263 return *this; 2264 } 2265 // postfix ++ intentionally omitted 2266 2267 HashTableIteratorAdapter& operator--() { 2268 --impl_; 2269 return *this; 2270 } 2271 // postfix -- intentionally omitted 2272 2273 operator HashTableConstIteratorAdapter<HashTableType, Traits>() { 2274 typename HashTableType::const_iterator i = impl_; 2275 return i; 2276 } 2277 2278 typename HashTableType::iterator impl_; 2279 }; 2280 2281 template <typename HashTable, typename Traits> 2282 std::ostream& operator<<( 2283 std::ostream& stream, 2284 const HashTableIteratorAdapter<HashTable, Traits>& iterator) { 2285 return stream << iterator.impl_; 2286 } 2287 2288 template <typename T, typename U> 2289 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, 2290 const HashTableConstIteratorAdapter<T, U>& b) { 2291 return a.impl_ == b.impl_; 2292 } 2293 2294 template <typename T, typename U> 2295 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, 2296 const HashTableConstIteratorAdapter<T, U>& b) { 2297 return a.impl_ != b.impl_; 2298 } 2299 2300 template <typename T, typename U> 2301 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, 2302 const HashTableIteratorAdapter<T, U>& b) { 2303 return a.impl_ == b.impl_; 2304 } 2305 2306 template <typename T, typename U> 2307 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, 2308 const HashTableIteratorAdapter<T, U>& b) { 2309 return a.impl_ != b.impl_; 2310 } 2311 2312 // All 4 combinations of ==, != and Const,non const. 2313 template <typename T, typename U> 2314 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, 2315 const HashTableIteratorAdapter<T, U>& b) { 2316 return a.impl_ == b.impl_; 2317 } 2318 2319 template <typename T, typename U> 2320 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, 2321 const HashTableIteratorAdapter<T, U>& b) { 2322 return a.impl_ != b.impl_; 2323 } 2324 2325 template <typename T, typename U> 2326 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, 2327 const HashTableConstIteratorAdapter<T, U>& b) { 2328 return a.impl_ == b.impl_; 2329 } 2330 2331 template <typename T, typename U> 2332 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, 2333 const HashTableConstIteratorAdapter<T, U>& b) { 2334 return a.impl_ != b.impl_; 2335 } 2336 2337 template <typename Collection1, typename Collection2> 2338 inline void RemoveAll(Collection1& collection, 2339 const Collection2& to_be_removed) { 2340 if (collection.IsEmpty() || to_be_removed.IsEmpty()) 2341 return; 2342 typedef typename Collection2::const_iterator CollectionIterator; 2343 CollectionIterator end(to_be_removed.end()); 2344 for (CollectionIterator it(to_be_removed.begin()); it != end; ++it) 2345 collection.erase(*it); 2346 } 2347 2348 } // namespace WTF 2349 2350 #include "third_party/blink/renderer/platform/wtf/hash_iterators.h" 2351 2352 #endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_TABLE_H_ 2353