1 /*
2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
26 #define SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
27 
28 #include "memory/allocation.inline.hpp"
29 #include "runtime/atomic.hpp"
30 #include "runtime/orderAccess.hpp"
31 #include "runtime/prefetch.inline.hpp"
32 #include "utilities/concurrentHashTable.hpp"
33 #include "utilities/globalCounter.inline.hpp"
34 #include "utilities/numberSeq.hpp"
35 #include "utilities/spinYield.hpp"
36 
37 // 2^30 = 1G buckets
38 #define SIZE_BIG_LOG2 30
39 // 2^5  = 32 buckets
40 #define SIZE_SMALL_LOG2 5
41 
42 // Number from spinYield.hpp. In some loops SpinYield would be unfair.
43 #define SPINPAUSES_PER_YIELD 8192
44 
45 #ifdef ASSERT
46 #ifdef _LP64
47 // Two low bits are not usable.
48 static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
49 #else
50 // Two low bits are not usable.
51 static const void* POISON_PTR = (void*)0xffbadbac;
52 #endif
53 #endif
54 
55 // Node
56 template <typename CONFIG, MEMFLAGS F>
57 inline typename ConcurrentHashTable<CONFIG, F>::Node*
58 ConcurrentHashTable<CONFIG, F>::
next() const59   Node::next() const
60 {
61   return Atomic::load_acquire(&_next);
62 }
63 
64 // Bucket
65 template <typename CONFIG, MEMFLAGS F>
66 inline typename ConcurrentHashTable<CONFIG, F>::Node*
67 ConcurrentHashTable<CONFIG, F>::
first_raw() const68   Bucket::first_raw() const
69 {
70   return Atomic::load_acquire(&_first);
71 }
72 
73 template <typename CONFIG, MEMFLAGS F>
74 inline void ConcurrentHashTable<CONFIG, F>::
release_assign_node_ptr(typename ConcurrentHashTable<CONFIG,F>::Node * const volatile * dst,typename ConcurrentHashTable<CONFIG,F>::Node * node) const75   Bucket::release_assign_node_ptr(
76     typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst,
77     typename ConcurrentHashTable<CONFIG, F>::Node* node) const
78 {
79   // Due to this assert this methods is not static.
80   assert(is_locked(), "Must be locked.");
81   Node** tmp = (Node**)dst;
82   Atomic::release_store(tmp, clear_set_state(node, *dst));
83 }
84 
85 template <typename CONFIG, MEMFLAGS F>
86 inline typename ConcurrentHashTable<CONFIG, F>::Node*
87 ConcurrentHashTable<CONFIG, F>::
first() const88   Bucket::first() const
89 {
90   // We strip the states bit before returning the ptr.
91   return clear_state(Atomic::load_acquire(&_first));
92 }
93 
94 template <typename CONFIG, MEMFLAGS F>
95 inline bool ConcurrentHashTable<CONFIG, F>::
have_redirect() const96   Bucket::have_redirect() const
97 {
98   return is_state(first_raw(), STATE_REDIRECT_BIT);
99 }
100 
101 template <typename CONFIG, MEMFLAGS F>
102 inline bool ConcurrentHashTable<CONFIG, F>::
is_locked() const103   Bucket::is_locked() const
104 {
105   return is_state(first_raw(), STATE_LOCK_BIT);
106 }
107 
108 template <typename CONFIG, MEMFLAGS F>
109 inline void ConcurrentHashTable<CONFIG, F>::
lock()110   Bucket::lock()
111 {
112   int i = 0;
113   // SpinYield would be unfair here
114   while (!this->trylock()) {
115     if ((++i) == SPINPAUSES_PER_YIELD) {
116       // On contemporary OS yielding will give CPU to another runnable thread if
117       // there is no CPU available.
118       os::naked_yield();
119       i = 0;
120     } else {
121       SpinPause();
122     }
123   }
124 }
125 
126 template <typename CONFIG, MEMFLAGS F>
127 inline void ConcurrentHashTable<CONFIG, F>::
release_assign_last_node_next(typename ConcurrentHashTable<CONFIG,F>::Node * node)128   Bucket::release_assign_last_node_next(
129      typename ConcurrentHashTable<CONFIG, F>::Node* node)
130 {
131   assert(is_locked(), "Must be locked.");
132   Node* const volatile * ret = first_ptr();
133   while (clear_state(*ret) != NULL) {
134     ret = clear_state(*ret)->next_ptr();
135   }
136   release_assign_node_ptr(ret, node);
137 }
138 
139 template <typename CONFIG, MEMFLAGS F>
140 inline bool ConcurrentHashTable<CONFIG, F>::
cas_first(typename ConcurrentHashTable<CONFIG,F>::Node * node,typename ConcurrentHashTable<CONFIG,F>::Node * expect)141   Bucket::cas_first(typename ConcurrentHashTable<CONFIG, F>::Node* node,
142                     typename ConcurrentHashTable<CONFIG, F>::Node* expect
143                     )
144 {
145   if (is_locked()) {
146     return false;
147   }
148   if (Atomic::cmpxchg(&_first, expect, node) == expect) {
149     return true;
150   }
151   return false;
152 }
153 
154 template <typename CONFIG, MEMFLAGS F>
155 inline bool ConcurrentHashTable<CONFIG, F>::
trylock()156   Bucket::trylock()
157 {
158   if (is_locked()) {
159     return false;
160   }
161   // We will expect a clean first pointer.
162   Node* tmp = first();
163   if (Atomic::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) {
164     return true;
165   }
166   return false;
167 }
168 
169 template <typename CONFIG, MEMFLAGS F>
170 inline void ConcurrentHashTable<CONFIG, F>::
unlock()171   Bucket::unlock()
172 {
173   assert(is_locked(), "Must be locked.");
174   assert(!have_redirect(),
175          "Unlocking a bucket after it has reached terminal state.");
176   Atomic::release_store(&_first, clear_state(first()));
177 }
178 
179 template <typename CONFIG, MEMFLAGS F>
180 inline void ConcurrentHashTable<CONFIG, F>::
redirect()181   Bucket::redirect()
182 {
183   assert(is_locked(), "Must be locked.");
184   Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
185 }
186 
187 // InternalTable
188 template <typename CONFIG, MEMFLAGS F>
189 inline ConcurrentHashTable<CONFIG, F>::
InternalTable(size_t log2_size)190   InternalTable::InternalTable(size_t log2_size)
191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
192       _hash_mask(~(~((size_t)0) << _log2_size))
193 {
194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
195          "Bad size");
196   _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);
197   // Use placement new for each element instead of new[] which could use more
198   // memory than allocated.
199   for (size_t i = 0; i < _size; ++i) {
200     new (_buckets + i) Bucket();
201   }
202 }
203 
204 template <typename CONFIG, MEMFLAGS F>
205 inline ConcurrentHashTable<CONFIG, F>::
~InternalTable()206   InternalTable::~InternalTable()
207 {
208   FREE_C_HEAP_ARRAY(Bucket, _buckets);
209 }
210 
211 // ScopedCS
212 template <typename CONFIG, MEMFLAGS F>
213 inline ConcurrentHashTable<CONFIG, F>::
ScopedCS(Thread * thread,ConcurrentHashTable<CONFIG,F> * cht)214   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
215     : _thread(thread),
216       _cht(cht),
217       _cs_context(GlobalCounter::critical_section_begin(_thread))
218 {
219   // This version is published now.
220   if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
221     Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
222   }
223 }
224 
225 template <typename CONFIG, MEMFLAGS F>
226 inline ConcurrentHashTable<CONFIG, F>::
~ScopedCS()227   ScopedCS::~ScopedCS()
228 {
229   GlobalCounter::critical_section_end(_thread, _cs_context);
230 }
231 
232 template <typename CONFIG, MEMFLAGS F>
233 template <typename LOOKUP_FUNC>
234 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
get(LOOKUP_FUNC & lookup_f,bool * grow_hint)235   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
236 {
237   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
238 }
239 
240 // HaveDeletables
241 template <typename CONFIG, MEMFLAGS F>
242 template <typename EVALUATE_FUNC>
243 inline bool ConcurrentHashTable<CONFIG, F>::
have_deletable(Bucket * bucket,EVALUATE_FUNC & eval_f,Bucket * prefetch_bucket)244   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
245                                                       EVALUATE_FUNC& eval_f,
246                                                       Bucket* prefetch_bucket)
247 {
248   // Instantiated for pointer type (true), so we can use prefetch.
249   // When visiting all Nodes doing this prefetch give around 30%.
250   Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
251   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
252     if (pref != NULL) {
253       Prefetch::read(*pref->value(), 0);
254       pref = pref->next();
255     }
256     // Read next() Node* once.  May be racing with a thread moving the next
257     // pointers.
258     Node* next_pref = next->next();
259     if (next_pref != NULL) {
260       Prefetch::read(*next_pref->value(), 0);
261     }
262     if (eval_f(next->value())) {
263       return true;
264     }
265   }
266   return false;
267 }
268 
269 template <typename CONFIG, MEMFLAGS F>
270 template <bool b, typename EVALUATE_FUNC>
271 inline bool ConcurrentHashTable<CONFIG, F>::
have_deletable(Bucket * bucket,EVALUATE_FUNC & eval_f,Bucket * preb)272   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
273                                                    EVALUATE_FUNC& eval_f,
274                                                    Bucket* preb)
275 {
276   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
277     if (eval_f(next->value())) {
278       return true;
279     }
280   }
281   return false;
282 }
283 
284 // ConcurrentHashTable
285 template <typename CONFIG, MEMFLAGS F>
286 inline void ConcurrentHashTable<CONFIG, F>::
write_synchonize_on_visible_epoch(Thread * thread)287   write_synchonize_on_visible_epoch(Thread* thread)
288 {
289   assert(_resize_lock_owner == thread, "Re-size lock not held");
290   OrderAccess::fence(); // Prevent below load from floating up.
291   // If no reader saw this version we can skip write_synchronize.
292   if (Atomic::load_acquire(&_invisible_epoch) == thread) {
293     return;
294   }
295   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
296   // We set this/next version that we are synchronizing for to not published.
297   // A reader will zero this flag if it reads this/next version.
298   Atomic::release_store(&_invisible_epoch, thread);
299   GlobalCounter::write_synchronize();
300 }
301 
302 template <typename CONFIG, MEMFLAGS F>
303 inline bool ConcurrentHashTable<CONFIG, F>::
try_resize_lock(Thread * locker)304   try_resize_lock(Thread* locker)
305 {
306   if (_resize_lock->try_lock()) {
307     if (_resize_lock_owner != NULL) {
308       assert(locker != _resize_lock_owner, "Already own lock");
309       // We got mutex but internal state is locked.
310       _resize_lock->unlock();
311       return false;
312     }
313   } else {
314     return false;
315   }
316   _invisible_epoch = 0;
317   _resize_lock_owner = locker;
318   return true;
319 }
320 
321 template <typename CONFIG, MEMFLAGS F>
322 inline void ConcurrentHashTable<CONFIG, F>::
lock_resize_lock(Thread * locker)323   lock_resize_lock(Thread* locker)
324 {
325   size_t i = 0;
326   // If lock is hold by some other thread, the chances that it is return quick
327   // is low. So we will prefer yielding.
328   SpinYield yield(1, 512);
329   do {
330     _resize_lock->lock_without_safepoint_check();
331     // If holder of lock dropped mutex for safepoint mutex might be unlocked,
332     // and _resize_lock_owner will contain the owner.
333     if (_resize_lock_owner != NULL) {
334       assert(locker != _resize_lock_owner, "Already own lock");
335       // We got mutex but internal state is locked.
336       _resize_lock->unlock();
337       yield.wait();
338     } else {
339       break;
340     }
341   } while(true);
342   _resize_lock_owner = locker;
343   _invisible_epoch = 0;
344 }
345 
346 template <typename CONFIG, MEMFLAGS F>
347 inline void ConcurrentHashTable<CONFIG, F>::
unlock_resize_lock(Thread * locker)348   unlock_resize_lock(Thread* locker)
349 {
350   _invisible_epoch = 0;
351   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
352   _resize_lock_owner = NULL;
353   _resize_lock->unlock();
354 }
355 
356 template <typename CONFIG, MEMFLAGS F>
357 inline void ConcurrentHashTable<CONFIG, F>::
free_nodes()358   free_nodes()
359 {
360   // We assume we are not MT during freeing.
361   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
362     Bucket* bucket = _table->get_buckets() + node_it;
363     Node* node = bucket->first();
364     while (node != NULL) {
365       Node* free_node = node;
366       node = node->next();
367       Node::destroy_node(free_node);
368     }
369   }
370 }
371 
372 template <typename CONFIG, MEMFLAGS F>
373 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
374 ConcurrentHashTable<CONFIG, F>::
get_table() const375   get_table() const
376 {
377   return Atomic::load_acquire(&_table);
378 }
379 
380 template <typename CONFIG, MEMFLAGS F>
381 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
382 ConcurrentHashTable<CONFIG, F>::
get_new_table() const383   get_new_table() const
384 {
385   return Atomic::load_acquire(&_new_table);
386 }
387 
388 template <typename CONFIG, MEMFLAGS F>
389 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
390 ConcurrentHashTable<CONFIG, F>::
set_table_from_new()391   set_table_from_new()
392 {
393   InternalTable* old_table = _table;
394   // Publish the new table.
395   Atomic::release_store(&_table, _new_table);
396   // All must see this.
397   GlobalCounter::write_synchronize();
398   // _new_table not read any more.
399   _new_table = NULL;
400   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
401   return old_table;
402 }
403 
404 template <typename CONFIG, MEMFLAGS F>
405 inline void ConcurrentHashTable<CONFIG, F>::
internal_grow_range(Thread * thread,size_t start,size_t stop)406   internal_grow_range(Thread* thread, size_t start, size_t stop)
407 {
408   assert(stop <= _table->_size, "Outside backing array");
409   assert(_new_table != NULL, "Grow not proper setup before start");
410   // The state is also copied here. Hence all buckets in new table will be
411   // locked. I call the siblings odd/even, where even have high bit 0 and odd
412   // have high bit 1.
413   for (size_t even_index = start; even_index < stop; even_index++) {
414     Bucket* bucket = _table->get_bucket(even_index);
415 
416     bucket->lock();
417 
418     size_t odd_index = even_index + _table->_size;
419     _new_table->get_buckets()[even_index] = *bucket;
420     _new_table->get_buckets()[odd_index] = *bucket;
421 
422     // Moves lockers go to new table, where they will wait until unlock() below.
423     bucket->redirect(); /* Must release stores above */
424 
425     // When this is done we have separated the nodes into corresponding buckets
426     // in new table.
427     if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
428       // If bucket is empty, unzip does nothing.
429       // We must make sure readers go to new table before we poison the bucket.
430       DEBUG_ONLY(GlobalCounter::write_synchronize();)
431     }
432 
433     // Unlock for writes into the new table buckets.
434     _new_table->get_bucket(even_index)->unlock();
435     _new_table->get_bucket(odd_index)->unlock();
436 
437     DEBUG_ONLY(
438        bucket->release_assign_node_ptr(
439           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
440     )
441   }
442 }
443 
444 template <typename CONFIG, MEMFLAGS F>
445 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
446 inline bool ConcurrentHashTable<CONFIG, F>::
internal_remove(Thread * thread,LOOKUP_FUNC & lookup_f,DELETE_FUNC & delete_f)447   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
448 {
449   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
450   assert(bucket->is_locked(), "Must be locked.");
451   Node* const volatile * rem_n_prev = bucket->first_ptr();
452   Node* rem_n = bucket->first();
453   bool have_dead = false;
454   while (rem_n != NULL) {
455     if (lookup_f.equals(rem_n->value(), &have_dead)) {
456       bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
457       break;
458     } else {
459       rem_n_prev = rem_n->next_ptr();
460       rem_n = rem_n->next();
461     }
462   }
463 
464   bucket->unlock();
465 
466   if (rem_n == NULL) {
467     return false;
468   }
469   // Publish the deletion.
470   GlobalCounter::write_synchronize();
471   delete_f(rem_n->value());
472   Node::destroy_node(rem_n);
473   JFR_ONLY(_stats_rate.remove();)
474   return true;
475 }
476 
477 template <typename CONFIG, MEMFLAGS F>
478 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
479 inline void ConcurrentHashTable<CONFIG, F>::
do_bulk_delete_locked_for(Thread * thread,size_t start_idx,size_t stop_idx,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f,bool is_mt)480   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
481                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
482 {
483   // Here we have resize lock so table is SMR safe, and there is no new
484   // table. Can do this in parallel if we want.
485   assert((is_mt && _resize_lock_owner != NULL) ||
486          (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
487   Node* ndel[BULK_DELETE_LIMIT];
488   InternalTable* table = get_table();
489   assert(start_idx < stop_idx, "Must be");
490   assert(stop_idx <= _table->_size, "Must be");
491   // Here manual do critical section since we don't want to take the cost of
492   // locking the bucket if there is nothing to delete. But we can have
493   // concurrent single deletes. The _invisible_epoch can only be used by the
494   // owner of _resize_lock, us here. There we should not changed it in our
495   // own read-side.
496   GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(thread);
497   for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
498     Bucket* bucket = table->get_bucket(bucket_it);
499     Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
500                               table->get_bucket(bucket_it+1) : NULL;
501 
502     if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
503         have_deletable(bucket, eval_f, prefetch_bucket)) {
504         // Nothing to remove in this bucket.
505         continue;
506     }
507 
508     GlobalCounter::critical_section_end(thread, cs_context);
509     // We left critical section but the bucket cannot be removed while we hold
510     // the _resize_lock.
511     bucket->lock();
512     size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
513     bucket->unlock();
514     if (is_mt) {
515       GlobalCounter::write_synchronize();
516     } else {
517       write_synchonize_on_visible_epoch(thread);
518     }
519     for (size_t node_it = 0; node_it < nd; node_it++) {
520       del_f(ndel[node_it]->value());
521       Node::destroy_node(ndel[node_it]);
522       JFR_ONLY(_stats_rate.remove();)
523       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
524     }
525     cs_context = GlobalCounter::critical_section_begin(thread);
526   }
527   GlobalCounter::critical_section_end(thread, cs_context);
528 }
529 
530 template <typename CONFIG, MEMFLAGS F>
531 template <typename LOOKUP_FUNC>
532 inline void ConcurrentHashTable<CONFIG, F>::
delete_in_bucket(Thread * thread,Bucket * bucket,LOOKUP_FUNC & lookup_f)533   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
534 {
535   assert(bucket->is_locked(), "Must be locked.");
536 
537   size_t dels = 0;
538   Node* ndel[BULK_DELETE_LIMIT];
539   Node* const volatile * rem_n_prev = bucket->first_ptr();
540   Node* rem_n = bucket->first();
541   while (rem_n != NULL) {
542     bool is_dead = false;
543     lookup_f.equals(rem_n->value(), &is_dead);
544     if (is_dead) {
545       ndel[dels++] = rem_n;
546       Node* next_node = rem_n->next();
547       bucket->release_assign_node_ptr(rem_n_prev, next_node);
548       rem_n = next_node;
549       if (dels == BULK_DELETE_LIMIT) {
550         break;
551       }
552     } else {
553       rem_n_prev = rem_n->next_ptr();
554       rem_n = rem_n->next();
555     }
556   }
557   if (dels > 0) {
558     GlobalCounter::write_synchronize();
559     for (size_t node_it = 0; node_it < dels; node_it++) {
560       Node::destroy_node(ndel[node_it]);
561       JFR_ONLY(_stats_rate.remove();)
562       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
563     }
564   }
565 }
566 
567 template <typename CONFIG, MEMFLAGS F>
568 inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
569 ConcurrentHashTable<CONFIG, F>::
get_bucket(uintx hash) const570   get_bucket(uintx hash) const
571 {
572   InternalTable* table = get_table();
573   Bucket* bucket = get_bucket_in(table, hash);
574   if (bucket->have_redirect()) {
575     table = get_new_table();
576     bucket = get_bucket_in(table, hash);
577   }
578   return bucket;
579 }
580 
581 template <typename CONFIG, MEMFLAGS F>
582 inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
583 ConcurrentHashTable<CONFIG, F>::
get_bucket_locked(Thread * thread,const uintx hash)584   get_bucket_locked(Thread* thread, const uintx hash)
585 {
586   Bucket* bucket;
587   int i = 0;
588   // SpinYield would be unfair here
589   while(true) {
590     {
591       // We need a critical section to protect the table itself. But if we fail
592       // we must leave critical section otherwise we would deadlock.
593       ScopedCS cs(thread, this);
594       bucket = get_bucket(hash);
595       if (bucket->trylock()) {
596         break; /* ends critical section */
597       }
598     } /* ends critical section */
599     if ((++i) == SPINPAUSES_PER_YIELD) {
600       // On contemporary OS yielding will give CPU to another runnable thread if
601       // there is no CPU available.
602       os::naked_yield();
603       i = 0;
604     } else {
605       SpinPause();
606     }
607   }
608   return bucket;
609 }
610 
611 // Always called within critical section
612 template <typename CONFIG, MEMFLAGS F>
613 template <typename LOOKUP_FUNC>
614 typename ConcurrentHashTable<CONFIG, F>::Node*
615 ConcurrentHashTable<CONFIG, F>::
get_node(const Bucket * const bucket,LOOKUP_FUNC & lookup_f,bool * have_dead,size_t * loops) const616   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
617            bool* have_dead, size_t* loops) const
618 {
619   size_t loop_count = 0;
620   Node* node = bucket->first();
621   while (node != NULL) {
622     bool is_dead = false;
623     ++loop_count;
624     if (lookup_f.equals(node->value(), &is_dead)) {
625       break;
626     }
627     if (is_dead && !(*have_dead)) {
628       *have_dead = true;
629     }
630     node = node->next();
631   }
632   if (loops != NULL) {
633     *loops = loop_count;
634   }
635   return node;
636 }
637 
638 template <typename CONFIG, MEMFLAGS F>
639 inline bool ConcurrentHashTable<CONFIG, F>::
unzip_bucket(Thread * thread,InternalTable * old_table,InternalTable * new_table,size_t even_index,size_t odd_index)640   unzip_bucket(Thread* thread, InternalTable* old_table,
641                InternalTable* new_table, size_t even_index, size_t odd_index)
642 {
643   Node* aux = old_table->get_bucket(even_index)->first();
644   if (aux == NULL) {
645     // This is an empty bucket and in debug we poison first ptr in bucket.
646     // Therefore we must make sure no readers are looking at this bucket.
647     // If we don't do a write_synch here, caller must do it.
648     return false;
649   }
650   Node* delete_me = NULL;
651   Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
652   Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
653   while (aux != NULL) {
654     bool dead_hash = false;
655     size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
656     Node* aux_next = aux->next();
657     if (dead_hash) {
658       delete_me = aux;
659       // This item is dead, move both list to next
660       new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
661                                                                 aux_next);
662       new_table->get_bucket(even_index)->release_assign_node_ptr(even,
663                                                                  aux_next);
664     } else {
665       size_t aux_index = bucket_idx_hash(new_table, aux_hash);
666       if (aux_index == even_index) {
667         // This is a even, so move odd to aux/even next
668         new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
669                                                                   aux_next);
670         // Keep in even list
671         even = aux->next_ptr();
672       } else if (aux_index == odd_index) {
673         // This is a odd, so move odd to aux/odd next
674         new_table->get_bucket(even_index)->release_assign_node_ptr(even,
675                                                                    aux_next);
676         // Keep in odd list
677         odd = aux->next_ptr();
678       } else {
679         fatal("aux_index does not match even or odd indices");
680       }
681     }
682     aux = aux_next;
683 
684     // We can only move 1 pointer otherwise a reader might be moved to the wrong
685     // chain. E.g. looking for even hash value but got moved to the odd bucket
686     // chain.
687     write_synchonize_on_visible_epoch(thread);
688     if (delete_me != NULL) {
689       Node::destroy_node(delete_me);
690       delete_me = NULL;
691     }
692   }
693   return true;
694 }
695 
696 template <typename CONFIG, MEMFLAGS F>
697 inline bool ConcurrentHashTable<CONFIG, F>::
internal_shrink_prolog(Thread * thread,size_t log2_size)698   internal_shrink_prolog(Thread* thread, size_t log2_size)
699 {
700   if (!try_resize_lock(thread)) {
701     return false;
702   }
703   assert(_resize_lock_owner == thread, "Re-size lock not held");
704   if (_table->_log2_size == _log2_start_size ||
705       _table->_log2_size <= log2_size) {
706     unlock_resize_lock(thread);
707     return false;
708   }
709   _new_table = new InternalTable(_table->_log2_size - 1);
710   return true;
711 }
712 
713 template <typename CONFIG, MEMFLAGS F>
714 inline void ConcurrentHashTable<CONFIG, F>::
internal_shrink_epilog(Thread * thread)715   internal_shrink_epilog(Thread* thread)
716 {
717   assert(_resize_lock_owner == thread, "Re-size lock not held");
718 
719   InternalTable* old_table = set_table_from_new();
720   _size_limit_reached = false;
721   unlock_resize_lock(thread);
722 #ifdef ASSERT
723   for (size_t i = 0; i < old_table->_size; i++) {
724     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
725            "No poison found");
726   }
727 #endif
728   // ABA safe, old_table not visible to any other threads.
729   delete old_table;
730 }
731 
732 template <typename CONFIG, MEMFLAGS F>
733 inline void ConcurrentHashTable<CONFIG, F>::
internal_shrink_range(Thread * thread,size_t start,size_t stop)734   internal_shrink_range(Thread* thread, size_t start, size_t stop)
735 {
736   // The state is also copied here.
737   // Hence all buckets in new table will be locked.
738   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
739     size_t even_hash_index = bucket_it; // High bit 0
740     size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
741 
742     Bucket* b_old_even = _table->get_bucket(even_hash_index);
743     Bucket* b_old_odd  = _table->get_bucket(odd_hash_index);
744 
745     b_old_even->lock();
746     b_old_odd->lock();
747 
748     _new_table->get_buckets()[bucket_it] = *b_old_even;
749 
750     // Put chains together.
751     _new_table->get_bucket(bucket_it)->
752       release_assign_last_node_next(*(b_old_odd->first_ptr()));
753 
754     b_old_even->redirect();
755     b_old_odd->redirect();
756 
757     write_synchonize_on_visible_epoch(thread);
758 
759     // Unlock for writes into new smaller table.
760     _new_table->get_bucket(bucket_it)->unlock();
761 
762     DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
763                                                    (Node*)POISON_PTR);)
764     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
765                                                   (Node*)POISON_PTR);)
766   }
767 }
768 
769 template <typename CONFIG, MEMFLAGS F>
770 inline bool ConcurrentHashTable<CONFIG, F>::
internal_shrink(Thread * thread,size_t log2_size)771   internal_shrink(Thread* thread, size_t log2_size)
772 {
773   if (!internal_shrink_prolog(thread, log2_size)) {
774     assert(_resize_lock_owner != thread, "Re-size lock held");
775     return false;
776   }
777   assert(_resize_lock_owner == thread, "Should be locked by me");
778   internal_shrink_range(thread, 0, _new_table->_size);
779   internal_shrink_epilog(thread);
780   assert(_resize_lock_owner != thread, "Re-size lock held");
781   return true;
782 }
783 
784 template <typename CONFIG, MEMFLAGS F>
785 inline bool ConcurrentHashTable<CONFIG, F>::
internal_grow_prolog(Thread * thread,size_t log2_size)786   internal_grow_prolog(Thread* thread, size_t log2_size)
787 {
788   // This double checking of _size_limit_reached/is_max_size_reached()
789   //  we only do in grow path, since grow means high load on table
790   // while shrink means low load.
791   if (is_max_size_reached()) {
792     return false;
793   }
794   if (!try_resize_lock(thread)) {
795     // Either we have an ongoing resize or an operation which doesn't want us
796     // to resize now.
797     return false;
798   }
799   if (is_max_size_reached() || _table->_log2_size >= log2_size) {
800     unlock_resize_lock(thread);
801     return false;
802   }
803 
804   _new_table = new InternalTable(_table->_log2_size + 1);
805 
806   if (_new_table->_log2_size == _log2_size_limit) {
807     _size_limit_reached = true;
808   }
809 
810   return true;
811 }
812 
813 template <typename CONFIG, MEMFLAGS F>
814 inline void ConcurrentHashTable<CONFIG, F>::
internal_grow_epilog(Thread * thread)815   internal_grow_epilog(Thread* thread)
816 {
817   assert(_resize_lock_owner == thread, "Should be locked");
818 
819   InternalTable* old_table = set_table_from_new();
820   unlock_resize_lock(thread);
821 #ifdef ASSERT
822   for (size_t i = 0; i < old_table->_size; i++) {
823     assert(old_table->get_bucket(i++)->first() == POISON_PTR,
824            "No poison found");
825   }
826 #endif
827   // ABA safe, old_table not visible to any other threads.
828   delete old_table;
829 }
830 
831 template <typename CONFIG, MEMFLAGS F>
832 inline bool ConcurrentHashTable<CONFIG, F>::
internal_grow(Thread * thread,size_t log2_size)833   internal_grow(Thread* thread, size_t log2_size)
834 {
835   if (!internal_grow_prolog(thread, log2_size)) {
836     assert(_resize_lock_owner != thread, "Re-size lock held");
837     return false;
838   }
839   assert(_resize_lock_owner == thread, "Should be locked by me");
840   internal_grow_range(thread, 0, _table->_size);
841   internal_grow_epilog(thread);
842   assert(_resize_lock_owner != thread, "Re-size lock held");
843   return true;
844 }
845 
846 // Always called within critical section
847 template <typename CONFIG, MEMFLAGS F>
848 template <typename LOOKUP_FUNC>
849 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
internal_get(Thread * thread,LOOKUP_FUNC & lookup_f,bool * grow_hint)850   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
851 {
852   bool clean = false;
853   size_t loops = 0;
854   VALUE* ret = NULL;
855 
856   const Bucket* bucket = get_bucket(lookup_f.get_hash());
857   Node* node = get_node(bucket, lookup_f, &clean, &loops);
858   if (node != NULL) {
859     ret = node->value();
860   }
861   if (grow_hint != NULL) {
862     *grow_hint = loops > _grow_hint;
863   }
864 
865   return ret;
866 }
867 
868 template <typename CONFIG, MEMFLAGS F>
869 template <typename LOOKUP_FUNC>
870 inline bool ConcurrentHashTable<CONFIG, F>::
internal_insert(Thread * thread,LOOKUP_FUNC & lookup_f,const VALUE & value,bool * grow_hint,bool * clean_hint)871   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
872                   bool* grow_hint, bool* clean_hint)
873 {
874   bool ret = false;
875   bool clean = false;
876   bool locked;
877   size_t loops = 0;
878   size_t i = 0;
879   uintx hash = lookup_f.get_hash();
880   Node* new_node = Node::create_node(value, NULL);
881 
882   while (true) {
883     {
884       ScopedCS cs(thread, this); /* protected the table/bucket */
885       Bucket* bucket = get_bucket(hash);
886       Node* first_at_start = bucket->first();
887       Node* old = get_node(bucket, lookup_f, &clean, &loops);
888       if (old == NULL) {
889         new_node->set_next(first_at_start);
890         if (bucket->cas_first(new_node, first_at_start)) {
891           JFR_ONLY(_stats_rate.add();)
892           new_node = NULL;
893           ret = true;
894           break; /* leave critical section */
895         }
896         // CAS failed we must leave critical section and retry.
897         locked = bucket->is_locked();
898       } else {
899         // There is a duplicate.
900         break; /* leave critical section */
901       }
902     } /* leave critical section */
903     i++;
904     if (locked) {
905       os::naked_yield();
906     } else {
907       SpinPause();
908     }
909   }
910 
911   if (new_node != NULL) {
912     // CAS failed and a duplicate was inserted, we must free this node.
913     Node::destroy_node(new_node);
914   } else if (i == 0 && clean) {
915     // We only do cleaning on fast inserts.
916     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
917     delete_in_bucket(thread, bucket, lookup_f);
918     bucket->unlock();
919     clean = false;
920   }
921 
922   if (grow_hint != NULL) {
923     *grow_hint = loops > _grow_hint;
924   }
925 
926   if (clean_hint != NULL) {
927     *clean_hint = clean;
928   }
929 
930   return ret;
931 }
932 
933 template <typename CONFIG, MEMFLAGS F>
934 template <typename FUNC>
935 inline bool ConcurrentHashTable<CONFIG, F>::
visit_nodes(Bucket * bucket,FUNC & visitor_f)936   visit_nodes(Bucket* bucket, FUNC& visitor_f)
937 {
938   Node* current_node = bucket->first();
939   while (current_node != NULL) {
940     if (!visitor_f(current_node->value())) {
941       return false;
942     }
943     current_node = current_node->next();
944   }
945   return true;
946 }
947 
948 template <typename CONFIG, MEMFLAGS F>
949 template <typename FUNC>
950 inline void ConcurrentHashTable<CONFIG, F>::
do_scan_locked(Thread * thread,FUNC & scan_f)951   do_scan_locked(Thread* thread, FUNC& scan_f)
952 {
953   assert(_resize_lock_owner == thread, "Re-size lock not held");
954   // We can do a critical section over the entire loop but that would block
955   // updates for a long time. Instead we choose to block resizes.
956   InternalTable* table = get_table();
957   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
958     ScopedCS cs(thread, this);
959     if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
960       break; /* ends critical section */
961     }
962   } /* ends critical section */
963 }
964 
965 template <typename CONFIG, MEMFLAGS F>
966 template <typename EVALUATE_FUNC>
967 inline size_t ConcurrentHashTable<CONFIG, F>::
delete_check_nodes(Bucket * bucket,EVALUATE_FUNC & eval_f,size_t num_del,Node ** ndel)968   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
969                      size_t num_del, Node** ndel)
970 {
971   size_t dels = 0;
972   Node* const volatile * rem_n_prev = bucket->first_ptr();
973   Node* rem_n = bucket->first();
974   while (rem_n != NULL) {
975     if (eval_f(rem_n->value())) {
976       ndel[dels++] = rem_n;
977       Node* next_node = rem_n->next();
978       bucket->release_assign_node_ptr(rem_n_prev, next_node);
979       rem_n = next_node;
980       if (dels == num_del) {
981         break;
982       }
983     } else {
984       rem_n_prev = rem_n->next_ptr();
985       rem_n = rem_n->next();
986     }
987   }
988   return dels;
989 }
990 
991 // Constructor
992 template <typename CONFIG, MEMFLAGS F>
993 inline ConcurrentHashTable<CONFIG, F>::
ConcurrentHashTable(size_t log2size,size_t log2size_limit,size_t grow_hint)994   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
995     : _new_table(NULL), _log2_size_limit(log2size_limit),
996        _log2_start_size(log2size), _grow_hint(grow_hint),
997        _size_limit_reached(false), _resize_lock_owner(NULL),
998        _invisible_epoch(0)
999 {
1000   _stats_rate = TableRateStatistics();
1001   _resize_lock =
1002     new Mutex(Mutex::leaf, "ConcurrentHashTable", true,
1003               Mutex::_safepoint_check_never);
1004   _table = new InternalTable(log2size);
1005   assert(log2size_limit >= log2size, "bad ergo");
1006   _size_limit_reached = _table->_log2_size == _log2_size_limit;
1007 }
1008 
1009 template <typename CONFIG, MEMFLAGS F>
1010 inline ConcurrentHashTable<CONFIG, F>::
~ConcurrentHashTable()1011   ~ConcurrentHashTable()
1012 {
1013   delete _resize_lock;
1014   free_nodes();
1015   delete _table;
1016 }
1017 
1018 template <typename CONFIG, MEMFLAGS F>
1019 inline size_t ConcurrentHashTable<CONFIG, F>::
get_size_log2(Thread * thread)1020   get_size_log2(Thread* thread)
1021 {
1022   ScopedCS cs(thread, this);
1023   return _table->_log2_size;
1024 }
1025 
1026 template <typename CONFIG, MEMFLAGS F>
1027 inline bool ConcurrentHashTable<CONFIG, F>::
shrink(Thread * thread,size_t size_limit_log2)1028   shrink(Thread* thread, size_t size_limit_log2)
1029 {
1030   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1031   bool ret = internal_shrink(thread, tmp);
1032   return ret;
1033 }
1034 
1035 template <typename CONFIG, MEMFLAGS F>
1036 inline bool ConcurrentHashTable<CONFIG, F>::
grow(Thread * thread,size_t size_limit_log2)1037   grow(Thread* thread, size_t size_limit_log2)
1038 {
1039   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1040   return internal_grow(thread, tmp);
1041 }
1042 
1043 template <typename CONFIG, MEMFLAGS F>
1044 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1045 inline bool ConcurrentHashTable<CONFIG, F>::
get(Thread * thread,LOOKUP_FUNC & lookup_f,FOUND_FUNC & found_f,bool * grow_hint)1046   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1047 {
1048   bool ret = false;
1049   ScopedCS cs(thread, this);
1050   VALUE* val = internal_get(thread, lookup_f, grow_hint);
1051   if (val != NULL) {
1052     found_f(val);
1053     ret = true;
1054   }
1055   return ret;
1056 }
1057 
1058 template <typename CONFIG, MEMFLAGS F>
1059 inline bool ConcurrentHashTable<CONFIG, F>::
unsafe_insert(const VALUE & value)1060   unsafe_insert(const VALUE& value) {
1061   bool dead_hash = false;
1062   size_t hash = CONFIG::get_hash(value, &dead_hash);
1063   if (dead_hash) {
1064     return false;
1065   }
1066   // This is an unsafe operation.
1067   InternalTable* table = get_table();
1068   Bucket* bucket = get_bucket_in(table, hash);
1069   assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1070   Node* new_node = Node::create_node(value, bucket->first());
1071   if (!bucket->cas_first(new_node, bucket->first())) {
1072     assert(false, "bad");
1073   }
1074   JFR_ONLY(_stats_rate.add();)
1075   return true;
1076 }
1077 
1078 template <typename CONFIG, MEMFLAGS F>
1079 template <typename SCAN_FUNC>
1080 inline bool ConcurrentHashTable<CONFIG, F>::
try_scan(Thread * thread,SCAN_FUNC & scan_f)1081   try_scan(Thread* thread, SCAN_FUNC& scan_f)
1082 {
1083   if (!try_resize_lock(thread)) {
1084     return false;
1085   }
1086   do_scan_locked(thread, scan_f);
1087   unlock_resize_lock(thread);
1088   return true;
1089 }
1090 
1091 template <typename CONFIG, MEMFLAGS F>
1092 template <typename SCAN_FUNC>
1093 inline void ConcurrentHashTable<CONFIG, F>::
do_scan(Thread * thread,SCAN_FUNC & scan_f)1094   do_scan(Thread* thread, SCAN_FUNC& scan_f)
1095 {
1096   assert(!SafepointSynchronize::is_at_safepoint(),
1097          "must be outside a safepoint");
1098   assert(_resize_lock_owner != thread, "Re-size lock held");
1099   lock_resize_lock(thread);
1100   do_scan_locked(thread, scan_f);
1101   unlock_resize_lock(thread);
1102   assert(_resize_lock_owner != thread, "Re-size lock held");
1103 }
1104 
1105 template <typename CONFIG, MEMFLAGS F>
1106 template <typename SCAN_FUNC>
1107 inline void ConcurrentHashTable<CONFIG, F>::
do_safepoint_scan(SCAN_FUNC & scan_f)1108   do_safepoint_scan(SCAN_FUNC& scan_f)
1109 {
1110   // We only allow this method to be used during a safepoint.
1111   assert(SafepointSynchronize::is_at_safepoint(),
1112          "must only be called in a safepoint");
1113   assert(Thread::current()->is_VM_thread(),
1114          "should be in vm thread");
1115 
1116   // Here we skip protection,
1117   // thus no other thread may use this table at the same time.
1118   InternalTable* table = get_table();
1119   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1120     Bucket* bucket = table->get_bucket(bucket_it);
1121     // If bucket have a redirect the items will be in the new table.
1122     // We must visit them there since the new table will contain any
1123     // concurrent inserts done after this bucket was resized.
1124     // If the bucket don't have redirect flag all items is in this table.
1125     if (!bucket->have_redirect()) {
1126       if(!visit_nodes(bucket, scan_f)) {
1127         return;
1128       }
1129     } else {
1130       assert(bucket->is_locked(), "Bucket must be locked.");
1131     }
1132   }
1133   // If there is a paused resize we also need to visit the already resized items.
1134   table = get_new_table();
1135   if (table == NULL) {
1136     return;
1137   }
1138   DEBUG_ONLY(if (table == POISON_PTR) { return; })
1139   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1140     Bucket* bucket = table->get_bucket(bucket_it);
1141     assert(!bucket->is_locked(), "Bucket must be unlocked.");
1142     if (!visit_nodes(bucket, scan_f)) {
1143       return;
1144     }
1145   }
1146 }
1147 
1148 template <typename CONFIG, MEMFLAGS F>
1149 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1150 inline bool ConcurrentHashTable<CONFIG, F>::
try_bulk_delete(Thread * thread,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f)1151   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1152 {
1153   if (!try_resize_lock(thread)) {
1154     return false;
1155   }
1156   do_bulk_delete_locked(thread, eval_f, del_f);
1157   unlock_resize_lock(thread);
1158   assert(_resize_lock_owner != thread, "Re-size lock held");
1159   return true;
1160 }
1161 
1162 template <typename CONFIG, MEMFLAGS F>
1163 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1164 inline void ConcurrentHashTable<CONFIG, F>::
bulk_delete(Thread * thread,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f)1165   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1166 {
1167   assert(!SafepointSynchronize::is_at_safepoint(),
1168          "must be outside a safepoint");
1169   lock_resize_lock(thread);
1170   do_bulk_delete_locked(thread, eval_f, del_f);
1171   unlock_resize_lock(thread);
1172 }
1173 
1174 template <typename CONFIG, MEMFLAGS F>
1175 template <typename VALUE_SIZE_FUNC>
1176 inline TableStatistics ConcurrentHashTable<CONFIG, F>::
statistics_calculate(Thread * thread,VALUE_SIZE_FUNC & vs_f)1177   statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
1178 {
1179   NumberSeq summary;
1180   size_t literal_bytes = 0;
1181   InternalTable* table = get_table();
1182   for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1183     ScopedCS cs(thread, this);
1184     size_t count = 0;
1185     Bucket* bucket = table->get_bucket(bucket_it);
1186     if (bucket->have_redirect() || bucket->is_locked()) {
1187       continue;
1188     }
1189     Node* current_node = bucket->first();
1190     while (current_node != NULL) {
1191       ++count;
1192       literal_bytes += vs_f(current_node->value());
1193       current_node = current_node->next();
1194     }
1195     summary.add((double)count);
1196   }
1197 
1198   return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));
1199 }
1200 
1201 template <typename CONFIG, MEMFLAGS F>
1202 template <typename VALUE_SIZE_FUNC>
1203 inline TableStatistics ConcurrentHashTable<CONFIG, F>::
statistics_get(Thread * thread,VALUE_SIZE_FUNC & vs_f,TableStatistics old)1204   statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)
1205 {
1206   if (!try_resize_lock(thread)) {
1207     return old;
1208   }
1209 
1210   TableStatistics ts = statistics_calculate(thread, vs_f);
1211   unlock_resize_lock(thread);
1212 
1213   return ts;
1214 }
1215 
1216 template <typename CONFIG, MEMFLAGS F>
1217 template <typename VALUE_SIZE_FUNC>
1218 inline void ConcurrentHashTable<CONFIG, F>::
statistics_to(Thread * thread,VALUE_SIZE_FUNC & vs_f,outputStream * st,const char * table_name)1219   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1220                 outputStream* st, const char* table_name)
1221 {
1222   if (!try_resize_lock(thread)) {
1223     st->print_cr("statistics unavailable at this moment");
1224     return;
1225   }
1226 
1227   TableStatistics ts = statistics_calculate(thread, vs_f);
1228   unlock_resize_lock(thread);
1229 
1230   ts.print(st, table_name);
1231 }
1232 
1233 template <typename CONFIG, MEMFLAGS F>
1234 inline bool ConcurrentHashTable<CONFIG, F>::
try_move_nodes_to(Thread * thread,ConcurrentHashTable<CONFIG,F> * to_cht)1235   try_move_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht)
1236 {
1237   if (!try_resize_lock(thread)) {
1238     return false;
1239   }
1240   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1241   for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1242     Bucket* bucket = _table->get_bucket(bucket_it);
1243     assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1244     while (bucket->first() != NULL) {
1245       Node* move_node = bucket->first();
1246       bool ok = bucket->cas_first(move_node->next(), move_node);
1247       assert(ok, "Uncontended cas must work");
1248       bool dead_hash = false;
1249       size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1250       if (!dead_hash) {
1251         Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1252         assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1253         move_node->set_next(insert_bucket->first());
1254         ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1255         assert(ok, "Uncontended cas must work");
1256       }
1257     }
1258   }
1259   unlock_resize_lock(thread);
1260   return true;
1261 }
1262 
1263 #endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
1264