1 /*
2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP
27 
28 #include "memory/allocation.inline.hpp"
29 #include "runtime/atomic.hpp"
30 #include "runtime/orderAccess.hpp"
31 #include "runtime/prefetch.inline.hpp"
32 #include "utilities/concurrentHashTable.hpp"
33 #include "utilities/globalCounter.inline.hpp"
34 #include "utilities/numberSeq.hpp"
35 #include "utilities/spinYield.hpp"
36 
37 // 2^30 = 1G buckets
38 #define SIZE_BIG_LOG2 30
39 // 2^5  = 32 buckets
40 #define SIZE_SMALL_LOG2 5
41 
42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
43 #define SPINPAUSES_PER_YIELD 8192
44 
45 #ifdef ASSERT
46 #ifdef _LP64
47 // Two low bits are not usable.
48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
49 #else
50 // Two low bits are not usable.
51 static const void* POISON_PTR = (void*)0xffbadbac;
52 #endif
53 #endif
54 
55 // Node
56 template <typename VALUE, typename CONFIG, MEMFLAGS F>
57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
58 ConcurrentHashTable<VALUE, CONFIG, F>::
next() const59   Node::next() const
60 {
61   return OrderAccess::load_acquire(&_next);
62 }
63 
64 // Bucket
65 template <typename VALUE, typename CONFIG, MEMFLAGS F>
66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
67 ConcurrentHashTable<VALUE, CONFIG, F>::
first_raw() const68   Bucket::first_raw() const
69 {
70   return OrderAccess::load_acquire(&_first);
71 }
72 
73 template <typename VALUE, typename CONFIG, MEMFLAGS F>
74 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
release_assign_node_ptr(typename ConcurrentHashTable<VALUE,CONFIG,F>::Node * const volatile * dst,typename ConcurrentHashTable<VALUE,CONFIG,F>::Node * node) const75   Bucket::release_assign_node_ptr(
76     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst,
77     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const
78 {
79   // Due to this assert this methods is not static.
80   assert(is_locked(), "Must be locked.");
81   Node** tmp = (Node**)dst;
82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
83 }
84 
85 template <typename VALUE, typename CONFIG, MEMFLAGS F>
86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
87 ConcurrentHashTable<VALUE, CONFIG, F>::
first() const88   Bucket::first() const
89 {
90   // We strip the states bit before returning the ptr.
91   return clear_state(OrderAccess::load_acquire(&_first));
92 }
93 
94 template <typename VALUE, typename CONFIG, MEMFLAGS F>
95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
have_redirect() const96   Bucket::have_redirect() const
97 {
98   return is_state(first_raw(), STATE_REDIRECT_BIT);
99 }
100 
101 template <typename VALUE, typename CONFIG, MEMFLAGS F>
102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
is_locked() const103   Bucket::is_locked() const
104 {
105   return is_state(first_raw(), STATE_LOCK_BIT);
106 }
107 
108 template <typename VALUE, typename CONFIG, MEMFLAGS F>
109 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
lock()110   Bucket::lock()
111 {
112   int i = 0;
113   // SpinYield would be unfair here
114   while (!this->trylock()) {
115     if ((++i) == SPINPAUSES_PER_YIELD) {
116       // On contemporary OS yielding will give CPU to another runnable thread if
117       // there is no CPU available.
118       os::naked_yield();
119       i = 0;
120     } else {
121       SpinPause();
122     }
123   }
124 }
125 
126 template <typename VALUE, typename CONFIG, MEMFLAGS F>
127 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
release_assign_last_node_next(typename ConcurrentHashTable<VALUE,CONFIG,F>::Node * node)128   Bucket::release_assign_last_node_next(
129      typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node)
130 {
131   assert(is_locked(), "Must be locked.");
132   Node* const volatile * ret = first_ptr();
133   while (clear_state(*ret) != NULL) {
134     ret = clear_state(*ret)->next_ptr();
135   }
136   release_assign_node_ptr(ret, node);
137 }
138 
139 template <typename VALUE, typename CONFIG, MEMFLAGS F>
140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
cas_first(typename ConcurrentHashTable<VALUE,CONFIG,F>::Node * node,typename ConcurrentHashTable<VALUE,CONFIG,F>::Node * expect)141   Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node,
142                     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect
143                     )
144 {
145   if (is_locked()) {
146     return false;
147   }
148   if (Atomic::cmpxchg(node, &_first, expect) == expect) {
149     return true;
150   }
151   return false;
152 }
153 
154 template <typename VALUE, typename CONFIG, MEMFLAGS F>
155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
trylock()156   Bucket::trylock()
157 {
158   if (is_locked()) {
159     return false;
160   }
161   // We will expect a clean first pointer.
162   Node* tmp = first();
163   if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
164     return true;
165   }
166   return false;
167 }
168 
169 template <typename VALUE, typename CONFIG, MEMFLAGS F>
170 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
unlock()171   Bucket::unlock()
172 {
173   assert(is_locked(), "Must be locked.");
174   assert(!have_redirect(),
175          "Unlocking a bucket after it has reached terminal state.");
176   OrderAccess::release_store(&_first, clear_state(first()));
177 }
178 
179 template <typename VALUE, typename CONFIG, MEMFLAGS F>
180 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
redirect()181   Bucket::redirect()
182 {
183   assert(is_locked(), "Must be locked.");
184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
185 }
186 
187 // InternalTable
188 template <typename VALUE, typename CONFIG, MEMFLAGS F>
189 inline ConcurrentHashTable<VALUE, CONFIG, F>::
InternalTable(size_t log2_size)190   InternalTable::InternalTable(size_t log2_size)
191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
192       _hash_mask(~(~((size_t)0) << _log2_size))
193 {
194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
195          "Bad size");
196   _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);
197   // Use placement new for each element instead of new[] which could use more
198   // memory than allocated.
199   for (size_t i = 0; i < _size; ++i) {
200     new (_buckets + i) Bucket();
201   }
202 }
203 
204 template <typename VALUE, typename CONFIG, MEMFLAGS F>
205 inline ConcurrentHashTable<VALUE, CONFIG, F>::
~InternalTable()206   InternalTable::~InternalTable()
207 {
208   FREE_C_HEAP_ARRAY(Bucket, _buckets);
209 }
210 
211 // ScopedCS
212 template <typename VALUE, typename CONFIG, MEMFLAGS F>
213 inline ConcurrentHashTable<VALUE, CONFIG, F>::
ScopedCS(Thread * thread,ConcurrentHashTable<VALUE,CONFIG,F> * cht)214   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
215     : _thread(thread), _cht(cht)
216 {
217   GlobalCounter::critical_section_begin(_thread);
218   // This version is published now.
219   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
220     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
221   }
222 }
223 
224 template <typename VALUE, typename CONFIG, MEMFLAGS F>
225 inline ConcurrentHashTable<VALUE, CONFIG, F>::
~ScopedCS()226   ScopedCS::~ScopedCS()
227 {
228   GlobalCounter::critical_section_end(_thread);
229 }
230 
231 // BaseConfig
232 template <typename VALUE, typename CONFIG, MEMFLAGS F>
233 inline void* ConcurrentHashTable<VALUE, CONFIG, F>::
allocate_node(size_t size,const VALUE & value)234   BaseConfig::allocate_node(size_t size, const VALUE& value)
235 {
236   return AllocateHeap(size, F);
237 }
238 
239 template <typename VALUE, typename CONFIG, MEMFLAGS F>
240 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
free_node(void * memory,const VALUE & value)241   BaseConfig::free_node(void* memory, const VALUE& value)
242 {
243   FreeHeap(memory);
244 }
245 
246 template <typename VALUE, typename CONFIG, MEMFLAGS F>
247 template <typename LOOKUP_FUNC>
248 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
get(LOOKUP_FUNC & lookup_f,bool * grow_hint)249   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
250 {
251   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
252 }
253 
254 // HaveDeletables
255 template <typename VALUE, typename CONFIG, MEMFLAGS F>
256 template <typename EVALUATE_FUNC>
257 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
have_deletable(Bucket * bucket,EVALUATE_FUNC & eval_f,Bucket * prefetch_bucket)258   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
259                                                       EVALUATE_FUNC& eval_f,
260                                                       Bucket* prefetch_bucket)
261 {
262   // Instantiated for pointer type (true), so we can use prefetch.
263   // When visiting all Nodes doing this prefetch give around 30%.
264   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
265   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
266     if (pref != NULL) {
267       Prefetch::read(*pref->value(), 0);
268       pref = pref->next();
269     }
270     // Read next() Node* once.  May be racing with a thread moving the next
271     // pointers.
272     Node* next_pref = next->next();
273     if (next_pref != NULL) {
274       Prefetch::read(*next_pref->value(), 0);
275     }
276     if (eval_f(next->value())) {
277       return true;
278     }
279   }
280   return false;
281 }
282 
283 template <typename VALUE, typename CONFIG, MEMFLAGS F>
284 template <bool b, typename EVALUATE_FUNC>
285 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
have_deletable(Bucket * bucket,EVALUATE_FUNC & eval_f,Bucket * preb)286   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
287                                                    EVALUATE_FUNC& eval_f,
288                                                    Bucket* preb)
289 {
290   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
291     if (eval_f(next->value())) {
292       return true;
293     }
294   }
295   return false;
296 }
297 
298 // ConcurrentHashTable
299 template <typename VALUE, typename CONFIG, MEMFLAGS F>
300 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
write_synchonize_on_visible_epoch(Thread * thread)301   write_synchonize_on_visible_epoch(Thread* thread)
302 {
303   assert(_resize_lock_owner == thread, "Re-size lock not held");
304   OrderAccess::fence(); // Prevent below load from floating up.
305   // If no reader saw this version we can skip write_synchronize.
306   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
307     return;
308   }
309   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
310   // We set this/next version that we are synchronizing for to not published.
311   // A reader will zero this flag if it reads this/next version.
312   OrderAccess::release_store(&_invisible_epoch, thread);
313   GlobalCounter::write_synchronize();
314 }
315 
316 template <typename VALUE, typename CONFIG, MEMFLAGS F>
317 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
try_resize_lock(Thread * locker)318   try_resize_lock(Thread* locker)
319 {
320   if (_resize_lock->try_lock()) {
321     if (_resize_lock_owner != NULL) {
322       assert(locker != _resize_lock_owner, "Already own lock");
323       // We got mutex but internal state is locked.
324       _resize_lock->unlock();
325       return false;
326     }
327   } else {
328     return false;
329   }
330   _invisible_epoch = 0;
331   _resize_lock_owner = locker;
332   return true;
333 }
334 
335 template <typename VALUE, typename CONFIG, MEMFLAGS F>
336 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
lock_resize_lock(Thread * locker)337   lock_resize_lock(Thread* locker)
338 {
339   size_t i = 0;
340   // If lock is hold by some other thread, the chances that it is return quick
341   // is low. So we will prefer yielding.
342   SpinYield yield(1, 512);
343   do {
344     _resize_lock->lock_without_safepoint_check();
345     // If holder of lock dropped mutex for safepoint mutex might be unlocked,
346     // and _resize_lock_owner will contain the owner.
347     if (_resize_lock_owner != NULL) {
348       assert(locker != _resize_lock_owner, "Already own lock");
349       // We got mutex but internal state is locked.
350       _resize_lock->unlock();
351       yield.wait();
352     } else {
353       break;
354     }
355   } while(true);
356   _resize_lock_owner = locker;
357   _invisible_epoch = 0;
358 }
359 
360 template <typename VALUE, typename CONFIG, MEMFLAGS F>
361 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
unlock_resize_lock(Thread * locker)362   unlock_resize_lock(Thread* locker)
363 {
364   _invisible_epoch = 0;
365   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
366   _resize_lock_owner = NULL;
367   _resize_lock->unlock();
368 }
369 
370 template <typename VALUE, typename CONFIG, MEMFLAGS F>
371 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
free_nodes()372   free_nodes()
373 {
374   // We assume we are not MT during freeing.
375   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
376     Bucket* bucket = _table->get_buckets() + node_it;
377     Node* node = bucket->first();
378     while (node != NULL) {
379       Node* free_node = node;
380       node = node->next();
381       Node::destroy_node(free_node);
382     }
383   }
384 }
385 
386 template <typename VALUE, typename CONFIG, MEMFLAGS F>
387 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
388 ConcurrentHashTable<VALUE, CONFIG, F>::
get_table() const389   get_table() const
390 {
391   return OrderAccess::load_acquire(&_table);
392 }
393 
394 template <typename VALUE, typename CONFIG, MEMFLAGS F>
395 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
396 ConcurrentHashTable<VALUE, CONFIG, F>::
get_new_table() const397   get_new_table() const
398 {
399   return OrderAccess::load_acquire(&_new_table);
400 }
401 
402 template <typename VALUE, typename CONFIG, MEMFLAGS F>
403 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
404 ConcurrentHashTable<VALUE, CONFIG, F>::
set_table_from_new()405   set_table_from_new()
406 {
407   InternalTable* old_table = _table;
408   // Publish the new table.
409   OrderAccess::release_store(&_table, _new_table);
410   // All must see this.
411   GlobalCounter::write_synchronize();
412   // _new_table not read any more.
413   _new_table = NULL;
414   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
415   return old_table;
416 }
417 
418 template <typename VALUE, typename CONFIG, MEMFLAGS F>
419 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
internal_grow_range(Thread * thread,size_t start,size_t stop)420   internal_grow_range(Thread* thread, size_t start, size_t stop)
421 {
422   assert(stop <= _table->_size, "Outside backing array");
423   assert(_new_table != NULL, "Grow not proper setup before start");
424   // The state is also copied here. Hence all buckets in new table will be
425   // locked. I call the siblings odd/even, where even have high bit 0 and odd
426   // have high bit 1.
427   for (size_t even_index = start; even_index < stop; even_index++) {
428     Bucket* bucket = _table->get_bucket(even_index);
429 
430     bucket->lock();
431 
432     size_t odd_index = even_index + _table->_size;
433     _new_table->get_buckets()[even_index] = *bucket;
434     _new_table->get_buckets()[odd_index] = *bucket;
435 
436     // Moves lockers go to new table, where they will wait until unlock() below.
437     bucket->redirect(); /* Must release stores above */
438 
439     // When this is done we have separated the nodes into corresponding buckets
440     // in new table.
441     if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
442       // If bucket is empty, unzip does nothing.
443       // We must make sure readers go to new table before we poison the bucket.
444       DEBUG_ONLY(GlobalCounter::write_synchronize();)
445     }
446 
447     // Unlock for writes into the new table buckets.
448     _new_table->get_bucket(even_index)->unlock();
449     _new_table->get_bucket(odd_index)->unlock();
450 
451     DEBUG_ONLY(
452        bucket->release_assign_node_ptr(
453           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
454     )
455   }
456 }
457 
458 template <typename VALUE, typename CONFIG, MEMFLAGS F>
459 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
460 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
internal_remove(Thread * thread,LOOKUP_FUNC & lookup_f,DELETE_FUNC & delete_f)461   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
462 {
463   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
464   assert(bucket->is_locked(), "Must be locked.");
465   Node* const volatile * rem_n_prev = bucket->first_ptr();
466   Node* rem_n = bucket->first();
467   bool have_dead = false;
468   while (rem_n != NULL) {
469     if (lookup_f.equals(rem_n->value(), &have_dead)) {
470       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
471       break;
472     } else {
473       rem_n_prev = rem_n->next_ptr();
474       rem_n = rem_n->next();
475     }
476   }
477 
478   bucket->unlock();
479 
480   if (rem_n == NULL) {
481     return false;
482   }
483   // Publish the deletion.
484   GlobalCounter::write_synchronize();
485   delete_f(rem_n->value());
486   Node::destroy_node(rem_n);
487   return true;
488 }
489 
490 template <typename VALUE, typename CONFIG, MEMFLAGS F>
491 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
492 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
do_bulk_delete_locked_for(Thread * thread,size_t start_idx,size_t stop_idx,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f,bool is_mt)493   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
494                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
495 {
496   // Here we have resize lock so table is SMR safe, and there is no new
497   // table. Can do this in parallel if we want.
498   assert((is_mt && _resize_lock_owner != NULL) ||
499          (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
500   Node* ndel[BULK_DELETE_LIMIT];
501   InternalTable* table = get_table();
502   assert(start_idx < stop_idx, "Must be");
503   assert(stop_idx <= _table->_size, "Must be");
504   // Here manual do critical section since we don't want to take the cost of
505   // locking the bucket if there is nothing to delete. But we can have
506   // concurrent single deletes. The _invisible_epoch can only be used by the
507   // owner of _resize_lock, us here. There we should not changed it in our
508   // own read-side.
509   GlobalCounter::critical_section_begin(thread);
510   for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
511     Bucket* bucket = table->get_bucket(bucket_it);
512     Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
513                               table->get_bucket(bucket_it+1) : NULL;
514 
515     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
516         have_deletable(bucket, eval_f, prefetch_bucket)) {
517         // Nothing to remove in this bucket.
518         continue;
519     }
520 
521     GlobalCounter::critical_section_end(thread);
522     // We left critical section but the bucket cannot be removed while we hold
523     // the _resize_lock.
524     bucket->lock();
525     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
526     bucket->unlock();
527     if (is_mt) {
528       GlobalCounter::write_synchronize();
529     } else {
530       write_synchonize_on_visible_epoch(thread);
531     }
532     for (size_t node_it = 0; node_it < nd; node_it++) {
533       del_f(ndel[node_it]->value());
534       Node::destroy_node(ndel[node_it]);
535       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
536     }
537     GlobalCounter::critical_section_begin(thread);
538   }
539   GlobalCounter::critical_section_end(thread);
540 }
541 
542 template <typename VALUE, typename CONFIG, MEMFLAGS F>
543 template <typename LOOKUP_FUNC>
544 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
delete_in_bucket(Thread * thread,Bucket * bucket,LOOKUP_FUNC & lookup_f)545   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
546 {
547   size_t dels = 0;
548   Node* ndel[BULK_DELETE_LIMIT];
549   Node* const volatile * rem_n_prev = bucket->first_ptr();
550   Node* rem_n = bucket->first();
551   while (rem_n != NULL) {
552     bool is_dead = false;
553     lookup_f.equals(rem_n->value(), &is_dead);
554     if (is_dead) {
555       ndel[dels++] = rem_n;
556       Node* next_node = rem_n->next();
557       bucket->release_assign_node_ptr(rem_n_prev, next_node);
558       rem_n = next_node;
559       if (dels == BULK_DELETE_LIMIT) {
560         break;
561       }
562     } else {
563       rem_n_prev = rem_n->next_ptr();
564       rem_n = rem_n->next();
565     }
566   }
567   if (dels > 0) {
568     GlobalCounter::write_synchronize();
569     for (size_t node_it = 0; node_it < dels; node_it++) {
570       Node::destroy_node(ndel[node_it]);
571       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
572     }
573   }
574 }
575 
576 template <typename VALUE, typename CONFIG, MEMFLAGS F>
577 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
578 ConcurrentHashTable<VALUE, CONFIG, F>::
get_bucket(uintx hash) const579   get_bucket(uintx hash) const
580 {
581   InternalTable* table = get_table();
582   Bucket* bucket = get_bucket_in(table, hash);
583   if (bucket->have_redirect()) {
584     table = get_new_table();
585     bucket = get_bucket_in(table, hash);
586   }
587   return bucket;
588 }
589 
590 template <typename VALUE, typename CONFIG, MEMFLAGS F>
591 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
592 ConcurrentHashTable<VALUE, CONFIG, F>::
get_bucket_locked(Thread * thread,const uintx hash)593   get_bucket_locked(Thread* thread, const uintx hash)
594 {
595   Bucket* bucket;
596   int i = 0;
597   // SpinYield would be unfair here
598   while(true) {
599     {
600       // We need a critical section to protect the table itself. But if we fail
601       // we must leave critical section otherwise we would deadlock.
602       ScopedCS cs(thread, this);
603       bucket = get_bucket(hash);
604       if (bucket->trylock()) {
605         break; /* ends critical section */
606       }
607     } /* ends critical section */
608     if ((++i) == SPINPAUSES_PER_YIELD) {
609       // On contemporary OS yielding will give CPU to another runnable thread if
610       // there is no CPU available.
611       os::naked_yield();
612       i = 0;
613     } else {
614       SpinPause();
615     }
616   }
617   return bucket;
618 }
619 
620 // Always called within critical section
621 template <typename VALUE, typename CONFIG, MEMFLAGS F>
622 template <typename LOOKUP_FUNC>
623 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
624 ConcurrentHashTable<VALUE, CONFIG, F>::
get_node(const Bucket * const bucket,LOOKUP_FUNC & lookup_f,bool * have_dead,size_t * loops) const625   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
626            bool* have_dead, size_t* loops) const
627 {
628   size_t loop_count = 0;
629   Node* node = bucket->first();
630   while (node != NULL) {
631     bool is_dead = false;
632     ++loop_count;
633     if (lookup_f.equals(node->value(), &is_dead)) {
634       break;
635     }
636     if (is_dead && !(*have_dead)) {
637       *have_dead = true;
638     }
639     node = node->next();
640   }
641   if (loops != NULL) {
642     *loops = loop_count;
643   }
644   return node;
645 }
646 
647 template <typename VALUE, typename CONFIG, MEMFLAGS F>
648 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
unzip_bucket(Thread * thread,InternalTable * old_table,InternalTable * new_table,size_t even_index,size_t odd_index)649   unzip_bucket(Thread* thread, InternalTable* old_table,
650                InternalTable* new_table, size_t even_index, size_t odd_index)
651 {
652   Node* aux = old_table->get_bucket(even_index)->first();
653   if (aux == NULL) {
654     // This is an empty bucket and in debug we poison first ptr in bucket.
655     // Therefore we must make sure no readers are looking at this bucket.
656     // If we don't do a write_synch here, caller must do it.
657     return false;
658   }
659   Node* delete_me = NULL;
660   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
661   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
662   while (aux != NULL) {
663     bool dead_hash = false;
664     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
665     Node* aux_next = aux->next();
666     if (dead_hash) {
667       delete_me = aux;
668       // This item is dead, move both list to next
669       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
670                                                                 aux_next);
671       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
672                                                                  aux_next);
673     } else {
674       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
675       if (aux_index == even_index) {
676         // This is a even, so move odd to aux/even next
677         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
678                                                                   aux_next);
679         // Keep in even list
680         even = aux->next_ptr();
681       } else if (aux_index == odd_index) {
682         // This is a odd, so move odd to aux/odd next
683         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
684                                                                    aux_next);
685         // Keep in odd list
686         odd = aux->next_ptr();
687       } else {
688         fatal("aux_index does not match even or odd indices");
689       }
690     }
691     aux = aux_next;
692 
693     // We can only move 1 pointer otherwise a reader might be moved to the wrong
694     // chain. E.g. looking for even hash value but got moved to the odd bucket
695     // chain.
696     write_synchonize_on_visible_epoch(thread);
697     if (delete_me != NULL) {
698       Node::destroy_node(delete_me);
699       delete_me = NULL;
700     }
701   }
702   return true;
703 }
704 
705 template <typename VALUE, typename CONFIG, MEMFLAGS F>
706 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
internal_shrink_prolog(Thread * thread,size_t log2_size)707   internal_shrink_prolog(Thread* thread, size_t log2_size)
708 {
709   if (!try_resize_lock(thread)) {
710     return false;
711   }
712   assert(_resize_lock_owner == thread, "Re-size lock not held");
713   if (_table->_log2_size == _log2_start_size ||
714       _table->_log2_size <= log2_size) {
715     unlock_resize_lock(thread);
716     return false;
717   }
718   _new_table = new InternalTable(_table->_log2_size - 1);
719   return true;
720 }
721 
722 template <typename VALUE, typename CONFIG, MEMFLAGS F>
723 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
internal_shrink_epilog(Thread * thread)724   internal_shrink_epilog(Thread* thread)
725 {
726   assert(_resize_lock_owner == thread, "Re-size lock not held");
727 
728   InternalTable* old_table = set_table_from_new();
729   _size_limit_reached = false;
730   unlock_resize_lock(thread);
731 #ifdef ASSERT
732   for (size_t i = 0; i < old_table->_size; i++) {
733     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
734            "No poison found");
735   }
736 #endif
737   // ABA safe, old_table not visible to any other threads.
738   delete old_table;
739 }
740 
741 template <typename VALUE, typename CONFIG, MEMFLAGS F>
742 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
internal_shrink_range(Thread * thread,size_t start,size_t stop)743   internal_shrink_range(Thread* thread, size_t start, size_t stop)
744 {
745   // The state is also copied here.
746   // Hence all buckets in new table will be locked.
747   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
748     size_t even_hash_index = bucket_it; // High bit 0
749     size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
750 
751     Bucket* b_old_even = _table->get_bucket(even_hash_index);
752     Bucket* b_old_odd  = _table->get_bucket(odd_hash_index);
753 
754     b_old_even->lock();
755     b_old_odd->lock();
756 
757     _new_table->get_buckets()[bucket_it] = *b_old_even;
758 
759     // Put chains together.
760     _new_table->get_bucket(bucket_it)->
761       release_assign_last_node_next(*(b_old_odd->first_ptr()));
762 
763     b_old_even->redirect();
764     b_old_odd->redirect();
765 
766     write_synchonize_on_visible_epoch(thread);
767 
768     // Unlock for writes into new smaller table.
769     _new_table->get_bucket(bucket_it)->unlock();
770 
771     DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
772                                                    (Node*)POISON_PTR);)
773     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
774                                                   (Node*)POISON_PTR);)
775   }
776 }
777 
778 template <typename VALUE, typename CONFIG, MEMFLAGS F>
779 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
internal_shrink(Thread * thread,size_t log2_size)780   internal_shrink(Thread* thread, size_t log2_size)
781 {
782   if (!internal_shrink_prolog(thread, log2_size)) {
783     assert(_resize_lock_owner != thread, "Re-size lock held");
784     return false;
785   }
786   assert(_resize_lock_owner == thread, "Should be locked by me");
787   internal_shrink_range(thread, 0, _new_table->_size);
788   internal_shrink_epilog(thread);
789   assert(_resize_lock_owner != thread, "Re-size lock held");
790   return true;
791 }
792 
793 template <typename VALUE, typename CONFIG, MEMFLAGS F>
794 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
internal_grow_prolog(Thread * thread,size_t log2_size)795   internal_grow_prolog(Thread* thread, size_t log2_size)
796 {
797   // This double checking of _size_limit_reached/is_max_size_reached()
798   //  we only do in grow path, since grow means high load on table
799   // while shrink means low load.
800   if (is_max_size_reached()) {
801     return false;
802   }
803   if (!try_resize_lock(thread)) {
804     // Either we have an ongoing resize or an operation which doesn't want us
805     // to resize now.
806     return false;
807   }
808   if (is_max_size_reached() || _table->_log2_size >= log2_size) {
809     unlock_resize_lock(thread);
810     return false;
811   }
812 
813   _new_table = new InternalTable(_table->_log2_size + 1);
814 
815   if (_new_table->_log2_size == _log2_size_limit) {
816     _size_limit_reached = true;
817   }
818 
819   return true;
820 }
821 
822 template <typename VALUE, typename CONFIG, MEMFLAGS F>
823 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
internal_grow_epilog(Thread * thread)824   internal_grow_epilog(Thread* thread)
825 {
826   assert(_resize_lock_owner == thread, "Should be locked");
827 
828   InternalTable* old_table = set_table_from_new();
829   unlock_resize_lock(thread);
830 #ifdef ASSERT
831   for (size_t i = 0; i < old_table->_size; i++) {
832     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
833            "No poison found");
834   }
835 #endif
836   // ABA safe, old_table not visible to any other threads.
837   delete old_table;
838 }
839 
840 template <typename VALUE, typename CONFIG, MEMFLAGS F>
841 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
internal_grow(Thread * thread,size_t log2_size)842   internal_grow(Thread* thread, size_t log2_size)
843 {
844   if (!internal_grow_prolog(thread, log2_size)) {
845     assert(_resize_lock_owner != thread, "Re-size lock held");
846     return false;
847   }
848   assert(_resize_lock_owner == thread, "Should be locked by me");
849   internal_grow_range(thread, 0, _table->_size);
850   internal_grow_epilog(thread);
851   assert(_resize_lock_owner != thread, "Re-size lock held");
852   return true;
853 }
854 
855 // Always called within critical section
856 template <typename VALUE, typename CONFIG, MEMFLAGS F>
857 template <typename LOOKUP_FUNC>
858 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
internal_get(Thread * thread,LOOKUP_FUNC & lookup_f,bool * grow_hint)859   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
860 {
861   bool clean = false;
862   size_t loops = 0;
863   VALUE* ret = NULL;
864 
865   const Bucket* bucket = get_bucket(lookup_f.get_hash());
866   Node* node = get_node(bucket, lookup_f, &clean, &loops);
867   if (node != NULL) {
868     ret = node->value();
869   }
870   if (grow_hint != NULL) {
871     *grow_hint = loops > _grow_hint;
872   }
873 
874   return ret;
875 }
876 
877 template <typename VALUE, typename CONFIG, MEMFLAGS F>
878 template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
879 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
internal_insert(Thread * thread,LOOKUP_FUNC & lookup_f,VALUE_FUNC & value_f,CALLBACK_FUNC & callback,bool * grow_hint)880   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
881                   CALLBACK_FUNC& callback, bool* grow_hint)
882 {
883   bool ret = false;
884   bool clean = false;
885   bool locked;
886   size_t loops = 0;
887   size_t i = 0;
888   Node* new_node = NULL;
889   uintx hash = lookup_f.get_hash();
890   while (true) {
891     {
892       ScopedCS cs(thread, this); /* protected the table/bucket */
893       Bucket* bucket = get_bucket(hash);
894 
895       Node* first_at_start = bucket->first();
896       Node* old = get_node(bucket, lookup_f, &clean, &loops);
897       if (old == NULL) {
898         // No duplicate found.
899         if (new_node == NULL) {
900           new_node = Node::create_node(value_f(), first_at_start);
901         } else {
902           new_node->set_next(first_at_start);
903         }
904         if (bucket->cas_first(new_node, first_at_start)) {
905           callback(true, new_node->value());
906           new_node = NULL;
907           ret = true;
908           break; /* leave critical section */
909         }
910         // CAS failed we must leave critical section and retry.
911         locked = bucket->is_locked();
912       } else {
913         // There is a duplicate.
914         callback(false, old->value());
915         break; /* leave critical section */
916       }
917     } /* leave critical section */
918     i++;
919     if (locked) {
920       os::naked_yield();
921     } else {
922       SpinPause();
923     }
924   }
925 
926   if (new_node != NULL) {
927     // CAS failed and a duplicate was inserted, we must free this node.
928     Node::destroy_node(new_node);
929   } else if (i == 0 && clean) {
930     // We only do cleaning on fast inserts.
931     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
932     assert(bucket->is_locked(), "Must be locked.");
933     delete_in_bucket(thread, bucket, lookup_f);
934     bucket->unlock();
935   }
936 
937   if (grow_hint != NULL) {
938     *grow_hint = loops > _grow_hint;
939   }
940 
941   return ret;
942 }
943 
944 template <typename VALUE, typename CONFIG, MEMFLAGS F>
945 template <typename FUNC>
946 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
visit_nodes(Bucket * bucket,FUNC & visitor_f)947   visit_nodes(Bucket* bucket, FUNC& visitor_f)
948 {
949   Node* current_node = bucket->first();
950   while (current_node != NULL) {
951     if (!visitor_f(current_node->value())) {
952       return false;
953     }
954     current_node = current_node->next();
955   }
956   return true;
957 }
958 
959 template <typename VALUE, typename CONFIG, MEMFLAGS F>
960 template <typename FUNC>
961 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
do_scan_locked(Thread * thread,FUNC & scan_f)962   do_scan_locked(Thread* thread, FUNC& scan_f)
963 {
964   assert(_resize_lock_owner == thread, "Re-size lock not held");
965   // We can do a critical section over the entire loop but that would block
966   // updates for a long time. Instead we choose to block resizes.
967   InternalTable* table = get_table();
968   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
969     ScopedCS cs(thread, this);
970     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
971       break; /* ends critical section */
972     }
973   } /* ends critical section */
974 }
975 
976 template <typename VALUE, typename CONFIG, MEMFLAGS F>
977 template <typename EVALUATE_FUNC>
978 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
delete_check_nodes(Bucket * bucket,EVALUATE_FUNC & eval_f,size_t num_del,Node ** ndel)979   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
980                      size_t num_del, Node** ndel)
981 {
982   size_t dels = 0;
983   Node* const volatile * rem_n_prev = bucket->first_ptr();
984   Node* rem_n = bucket->first();
985   while (rem_n != NULL) {
986     if (eval_f(rem_n->value())) {
987       ndel[dels++] = rem_n;
988       Node* next_node = rem_n->next();
989       bucket->release_assign_node_ptr(rem_n_prev, next_node);
990       rem_n = next_node;
991       if (dels == num_del) {
992         break;
993       }
994     } else {
995       rem_n_prev = rem_n->next_ptr();
996       rem_n = rem_n->next();
997     }
998   }
999   return dels;
1000 }
1001 
1002 // Constructor
1003 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1004 inline ConcurrentHashTable<VALUE, CONFIG, F>::
ConcurrentHashTable(size_t log2size,size_t log2size_limit,size_t grow_hint)1005   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
1006     : _new_table(NULL), _log2_start_size(log2size),
1007        _log2_size_limit(log2size_limit), _grow_hint(grow_hint),
1008        _size_limit_reached(false), _resize_lock_owner(NULL),
1009        _invisible_epoch(0)
1010 {
1011   _resize_lock =
1012     new Mutex(Mutex::leaf, "ConcurrentHashTable", false,
1013               Monitor::_safepoint_check_never);
1014   _table = new InternalTable(log2size);
1015   assert(log2size_limit >= log2size, "bad ergo");
1016   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1017 }
1018 
1019 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1020 inline ConcurrentHashTable<VALUE, CONFIG, F>::
~ConcurrentHashTable()1021   ~ConcurrentHashTable()
1022 {
1023   delete _resize_lock;
1024   free_nodes();
1025   delete _table;
1026 }
1027 
1028 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1029 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
get_size_log2(Thread * thread)1030   get_size_log2(Thread* thread)
1031 {
1032   ScopedCS cs(thread, this);
1033   return _table->_log2_size;
1034 }
1035 
1036 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1037 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
shrink(Thread * thread,size_t size_limit_log2)1038   shrink(Thread* thread, size_t size_limit_log2)
1039 {
1040   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1041   bool ret = internal_shrink(thread, tmp);
1042   return ret;
1043 }
1044 
1045 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1046 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
grow(Thread * thread,size_t size_limit_log2)1047   grow(Thread* thread, size_t size_limit_log2)
1048 {
1049   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1050   return internal_grow(thread, tmp);
1051 }
1052 
1053 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1054 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1055 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
get(Thread * thread,LOOKUP_FUNC & lookup_f,FOUND_FUNC & found_f,bool * grow_hint)1056   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1057 {
1058   bool ret = false;
1059   ScopedCS cs(thread, this);
1060   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1061   if (val != NULL) {
1062     found_f(val);
1063     ret = true;
1064   }
1065   return ret;
1066 }
1067 
1068 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1069 template <typename LOOKUP_FUNC>
1070 inline VALUE ConcurrentHashTable<VALUE, CONFIG, F>::
get_copy(Thread * thread,LOOKUP_FUNC & lookup_f,bool * grow_hint)1071   get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
1072 {
1073   ScopedCS cs(thread, this);
1074   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1075   return val != NULL ? *val : CONFIG::notfound();
1076 }
1077 
1078 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1079 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
unsafe_insert(const VALUE & value)1080   unsafe_insert(const VALUE& value) {
1081   bool dead_hash = false;
1082   size_t hash = CONFIG::get_hash(value, &dead_hash);
1083   if (dead_hash) {
1084     return false;
1085   }
1086   // This is an unsafe operation.
1087   InternalTable* table = get_table();
1088   Bucket* bucket = get_bucket_in(table, hash);
1089   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1090   Node* new_node = Node::create_node(value, bucket->first());
1091   if (!bucket->cas_first(new_node, bucket->first())) {
1092     assert(false, "bad");
1093   }
1094   return true;
1095 }
1096 
1097 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1098 template <typename SCAN_FUNC>
1099 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
try_scan(Thread * thread,SCAN_FUNC & scan_f)1100   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1101 {
1102   if (!try_resize_lock(thread)) {
1103     return false;
1104   }
1105   do_scan_locked(thread, scan_f);
1106   unlock_resize_lock(thread);
1107   return true;
1108 }
1109 
1110 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1111 template <typename SCAN_FUNC>
1112 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
do_scan(Thread * thread,SCAN_FUNC & scan_f)1113   do_scan(Thread* thread, SCAN_FUNC& scan_f)
1114 {
1115   assert(!SafepointSynchronize::is_at_safepoint(),
1116          "must be outside a safepoint");
1117   assert(_resize_lock_owner != thread, "Re-size lock held");
1118   lock_resize_lock(thread);
1119   do_scan_locked(thread, scan_f);
1120   unlock_resize_lock(thread);
1121   assert(_resize_lock_owner != thread, "Re-size lock held");
1122 }
1123 
1124 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1125 template <typename SCAN_FUNC>
1126 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
do_safepoint_scan(SCAN_FUNC & scan_f)1127   do_safepoint_scan(SCAN_FUNC& scan_f)
1128 {
1129   // We only allow this method to be used during a safepoint.
1130   assert(SafepointSynchronize::is_at_safepoint(),
1131          "must only be called in a safepoint");
1132   assert(Thread::current()->is_VM_thread(),
1133          "should be in vm thread");
1134 
1135   // Here we skip protection,
1136   // thus no other thread may use this table at the same time.
1137   InternalTable* table = get_table();
1138   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1139     Bucket* bucket = table->get_bucket(bucket_it);
1140     // If bucket have a redirect the items will be in the new table.
1141     // We must visit them there since the new table will contain any
1142     // concurrent inserts done after this bucket was resized.
1143     // If the bucket don't have redirect flag all items is in this table.
1144     if (!bucket->have_redirect()) {
1145       if(!visit_nodes(bucket, scan_f)) {
1146         return;
1147       }
1148     } else {
1149       assert(bucket->is_locked(), "Bucket must be locked.");
1150     }
1151   }
1152   // If there is a paused resize we also need to visit the already resized items.
1153   table = get_new_table();
1154   if (table == NULL) {
1155     return;
1156   }
1157   DEBUG_ONLY(if (table == POISON_PTR) { return; })
1158   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1159     Bucket* bucket = table->get_bucket(bucket_it);
1160     assert(!bucket->is_locked(), "Bucket must be unlocked.");
1161     if (!visit_nodes(bucket, scan_f)) {
1162       return;
1163     }
1164   }
1165 }
1166 
1167 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1168 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1169 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
try_bulk_delete(Thread * thread,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f)1170   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1171 {
1172   if (!try_resize_lock(thread)) {
1173     return false;
1174   }
1175   do_bulk_delete_locked(thread, eval_f, del_f);
1176   unlock_resize_lock(thread);
1177   assert(_resize_lock_owner != thread, "Re-size lock held");
1178   return true;
1179 }
1180 
1181 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1182 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1183 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
bulk_delete(Thread * thread,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f)1184   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1185 {
1186   assert(!SafepointSynchronize::is_at_safepoint(),
1187          "must be outside a safepoint");
1188   lock_resize_lock(thread);
1189   do_bulk_delete_locked(thread, eval_f, del_f);
1190   unlock_resize_lock(thread);
1191 }
1192 
1193 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1194 template <typename VALUE_SIZE_FUNC>
1195 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
statistics_to(Thread * thread,VALUE_SIZE_FUNC & vs_f,outputStream * st,const char * table_name)1196   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1197                 outputStream* st, const char* table_name)
1198 {
1199   NumberSeq summary;
1200   size_t literal_bytes = 0;
1201   if (!try_resize_lock(thread)) {
1202     st->print_cr("statistics unavailable at this moment");
1203     return;
1204   }
1205 
1206   InternalTable* table = get_table();
1207   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1208     ScopedCS cs(thread, this);
1209     size_t count = 0;
1210     Bucket* bucket = table->get_bucket(bucket_it);
1211     if (bucket->have_redirect() || bucket->is_locked()) {
1212         continue;
1213     }
1214     Node* current_node = bucket->first();
1215     while (current_node != NULL) {
1216       ++count;
1217       literal_bytes += vs_f(current_node->value());
1218       current_node = current_node->next();
1219     }
1220     summary.add((double)count);
1221   }
1222 
1223   double num_buckets = summary.num();
1224   double num_entries = summary.sum();
1225 
1226   size_t bucket_bytes = num_buckets * sizeof(Bucket);
1227   size_t entry_bytes  = num_entries * sizeof(Node);
1228   size_t total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
1229 
1230   size_t bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
1231   size_t entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
1232 
1233   st->print_cr("%s statistics:", table_name);
1234   st->print_cr("Number of buckets       : %9" PRIuPTR " = %9" PRIuPTR
1235                " bytes, each " SIZE_FORMAT,
1236                (size_t)num_buckets, bucket_bytes,  bucket_size);
1237   st->print_cr("Number of entries       : %9" PRIuPTR " = %9" PRIuPTR
1238                " bytes, each " SIZE_FORMAT,
1239                (size_t)num_entries, entry_bytes,   entry_size);
1240   if (literal_bytes != 0) {
1241     double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
1242     st->print_cr("Number of literals      : %9" PRIuPTR " = %9" PRIuPTR
1243                  " bytes, avg %7.3f",
1244                  (size_t)num_entries, literal_bytes, literal_avg);
1245   }
1246   st->print_cr("Total footprsize_t         : %9s = %9" PRIuPTR " bytes", ""
1247                , total_bytes);
1248   st->print_cr("Average bucket size     : %9.3f", summary.avg());
1249   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
1250   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
1251   st->print_cr("Maximum bucket size     : %9" PRIuPTR,
1252                (size_t)summary.maximum());
1253   unlock_resize_lock(thread);
1254 }
1255 
1256 template <typename VALUE, typename CONFIG, MEMFLAGS F>
1257 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
try_move_nodes_to(Thread * thread,ConcurrentHashTable<VALUE,CONFIG,F> * to_cht)1258   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
1259 {
1260   if (!try_resize_lock(thread)) {
1261     return false;
1262   }
1263   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1264   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1265     Bucket* bucket = _table->get_bucket(bucket_it);
1266     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1267     while (bucket->first() != NULL) {
1268       Node* move_node = bucket->first();
1269       bool ok = bucket->cas_first(move_node->next(), move_node);
1270       assert(ok, "Uncontended cas must work");
1271       bool dead_hash = false;
1272       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1273       if (!dead_hash) {
1274         Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1275         assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1276         move_node->set_next(insert_bucket->first());
1277         ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1278         assert(ok, "Uncontended cas must work");
1279       }
1280     }
1281   }
1282   unlock_resize_lock(thread);
1283   return true;
1284 }
1285 
1286 #endif // include guard
1287