1 /*
2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_HPP
26 #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_HPP
27 
28 // A mostly concurrent-hash-table where the read-side is wait-free, inserts are
29 // CAS and deletes mutual exclude each other on per bucket-basis. VALUE is the
30 // type kept inside each Node and CONFIG contains hash and allocation methods.
31 // A CALLBACK_FUNC and LOOKUP_FUNC needs to be provided for get and insert.
32 
33 class Thread;
34 
35 template <typename VALUE, typename CONFIG, MEMFLAGS F>
36 class ConcurrentHashTable : public CHeapObj<F> {
37  private:
38   // This is the internal node structure.
39   // Only constructed with placement new from memory allocated with MEMFLAGS of
40   // the InternalTable or user-defined memory.
41   class Node {
42    private:
43     Node * volatile _next;
44     VALUE _value;
45    public:
Node(const VALUE & value,Node * next=NULL)46     Node(const VALUE& value, Node* next = NULL)
47       : _next(next), _value(value) {
48       assert((((uintptr_t)this) & ((uintptr_t)0x3)) == 0,
49              "Must 16 bit aligned.");
50     }
51 
52     Node* next() const;
set_next(Node * node)53     void set_next(Node* node)         { _next = node; }
next_ptr()54     Node* const volatile * next_ptr() { return &_next; }
55 
value()56     VALUE* value()                    { return &_value; }
57 
58     // Creates a node.
create_node(const VALUE & value,Node * next=NULL)59     static Node* create_node(const VALUE& value, Node* next = NULL) {
60       return new (CONFIG::allocate_node(sizeof(Node), value)) Node(value, next);
61     }
62     // Destroys a node.
destroy_node(Node * node)63     static void destroy_node(Node* node) {
64       CONFIG::free_node((void*)node, node->_value);
65     }
66 
print_on(outputStream * st) const67     void print_on(outputStream* st) const {};
print_value_on(outputStream * st) const68     void print_value_on(outputStream* st) const {};
69   };
70 
71   // Only constructed with placement new from an array allocated with MEMFLAGS
72   // of InternalTable.
73   class Bucket {
74    private:
75 
76     // Embedded state in two low bits in first pointer is a spinlock with 3
77     // states, unlocked, locked, redirect. You must never busy-spin on trylock()
78     // or call lock() without _resize_lock, that would deadlock. Redirect can
79     // only be installed by owner and is the final state of a bucket.
80     // The only two valid flows are:
81     // unlocked -> locked -> unlocked
82     // unlocked -> locked -> redirect
83     // Locked state only applies to an updater.
84     // Reader only check for redirect.
85     Node * volatile _first;
86 
87     static const uintptr_t STATE_LOCK_BIT     = 0x1;
88     static const uintptr_t STATE_REDIRECT_BIT = 0x2;
89     static const uintptr_t STATE_MASK         = 0x3;
90 
91     // Get the first pointer unmasked.
92     Node* first_raw() const;
93 
94     // Methods to manipulate the embedded.
is_state(Node * node,uintptr_t bits)95     static bool is_state(Node* node, uintptr_t bits) {
96       return (bits & (uintptr_t)node) == bits;
97     }
98 
set_state(Node * n,uintptr_t bits)99     static Node* set_state(Node* n, uintptr_t bits) {
100       return (Node*)(bits | (uintptr_t)n);
101     }
102 
get_state(Node * node)103     static uintptr_t get_state(Node* node) {
104       return (((uintptr_t)node) & STATE_MASK);
105     }
106 
clear_state(Node * node)107     static Node* clear_state(Node* node) {
108       return (Node*)(((uintptr_t)node) & (~(STATE_MASK)));
109     }
110 
clear_set_state(Node * node,Node * state)111     static Node* clear_set_state(Node* node, Node* state) {
112       return (Node*)(((uintptr_t)clear_state(node)) ^ get_state(state));
113     }
114 
115    public:
116     // A bucket is only one pointer with the embedded state.
Bucket()117     Bucket() : _first(NULL) {};
118 
119     // Get the first pointer unmasked.
120     Node* first() const;
121 
122     // Get a pointer to the const first pointer. Do not deference this
123     // pointer, the pointer pointed to _may_ contain an embedded state. Such
124     // pointer should only be used as input to release_assign_node_ptr.
first_ptr()125     Node* const volatile * first_ptr() { return &_first; }
126 
127     // This is the only place where a pointer to a Node pointer that potentially
128     // is _first should be changed. Otherwise we destroy the embedded state. We
129     // only give out pointer to const Node pointer to avoid accidental
130     // assignment, thus here we must cast const part away. Method is not static
131     // due to an assert.
132     void release_assign_node_ptr(Node* const volatile * dst, Node* node) const;
133 
134     // This method assigns this buckets last Node next ptr to input Node.
135     void release_assign_last_node_next(Node* node);
136 
137     // Setting the first pointer must be done with CAS.
138     bool cas_first(Node *node, Node* expect);
139 
140     // Returns true if this bucket is redirecting to a new table.
141     // Redirect is a terminal state and will never change.
142     bool have_redirect() const;
143 
144     // Return true if this bucket is locked for updates.
145     bool is_locked() const;
146 
147     // Return true if this bucket was locked.
148     bool trylock();
149 
150     // The bucket might be invalid, due to a concurrent resize. The lock()
151     // method do no respect that and can deadlock if caller do not hold
152     // _resize_lock.
153     void lock();
154 
155     // Unlocks this bucket.
156     void unlock();
157 
158     // Installs redirect in this bucket.
159     // Prior to doing so you must have successfully locked this bucket.
160     void redirect();
161   };
162 
163   // The backing storage table holding the buckets and it's size and mask-bits.
164   // Table is always a power of two for two reasons:
165   // - Re-size can only change the size into half or double
166   //   (any pow 2 would also be possible).
167   // - Use masking of hash for bucket index.
168   class InternalTable : public CHeapObj<F> {
169    private:
170     Bucket* _buckets;        // Bucket array.
171    public:
172     const size_t _log2_size; // Size in log2.
173     const size_t _size;      // Size in log10.
174 
175     // The mask used on hash for selecting bucket.
176     // The masked value is guaranteed be to inside the buckets array.
177     const size_t _hash_mask;
178 
179     // Create a backing table
180     InternalTable(size_t log2_size);
181     ~InternalTable();
182 
get_buckets()183     Bucket* get_buckets() { return _buckets; }
get_bucket(size_t idx)184     Bucket* get_bucket(size_t idx) { return &_buckets[idx]; }
185   };
186 
187   // Used as default functor when no functor supplied for some methods.
188   struct NoOp {
operator ()ConcurrentHashTable::NoOp189     void operator()(VALUE*) {}
operator ()ConcurrentHashTable::NoOp190     const VALUE& operator()() {}
operator ()ConcurrentHashTable::NoOp191     void operator()(bool, VALUE*) {}
192   } noOp;
193 
194   // For materializing a supplied value.
195   class LazyValueRetrieve {
196    private:
197     const VALUE& _val;
198    public:
LazyValueRetrieve(const VALUE & val)199     LazyValueRetrieve(const VALUE& val) : _val(val) {}
operator ()()200     const VALUE& operator()() { return _val; }
201   };
202 
203   InternalTable* _table;      // Active table.
204   InternalTable* _new_table;  // Table we are resizing to.
205 
206   // Default sizes
207   static const size_t DEFAULT_MAX_SIZE_LOG2 = 21;
208   static const size_t DEFAULT_START_SIZE_LOG2 = 13;
209   static const size_t DEFAULT_GROW_HINT = 4; // Chain length
210 
211   const size_t _log2_size_limit;  // The biggest size.
212   const size_t _log2_start_size;  // Start size.
213   const size_t _grow_hint;        // Number of linked items
214 
215   volatile bool _size_limit_reached;
216 
217   // We serialize resizers and other bulk operations which do not support
218   // concurrent resize with this lock.
219   Mutex* _resize_lock;
220   // Since we need to drop mutex for safepoints, but stop other threads from
221   // taking the mutex after a safepoint this bool is the actual state. After
222   // acquiring the mutex you must check if this is already locked. If so you
223   // must drop the mutex until the real lock holder grabs the mutex.
224   volatile Thread* _resize_lock_owner;
225 
226   // Return true if lock mutex/state succeeded.
227   bool try_resize_lock(Thread* locker);
228   // Returns when both mutex and state are proper locked.
229   void lock_resize_lock(Thread* locker);
230   // Unlocks mutex and state.
231   void unlock_resize_lock(Thread* locker);
232 
233   // This method sets the _invisible_epoch and do a write_synchronize.
234   // Subsequent calls check the state of _invisible_epoch and determine if the
235   // write_synchronize can be avoided. If not, it sets the _invisible_epoch
236   // again and do a write_synchronize.
237   void write_synchonize_on_visible_epoch(Thread* thread);
238   // To be-able to avoid write_synchronize in resize and other bulk operation,
239   // this field keep tracks if a version of the hash-table was ever been seen.
240   // We the working thread pointer as tag for debugging. The _invisible_epoch
241   // can only be used by the owner of _resize_lock.
242   volatile Thread* _invisible_epoch;
243 
244   // Scoped critical section, which also handles the invisible epochs.
245   // An invisible epoch/version do not need a write_synchronize().
246   class ScopedCS: public StackObj {
247    protected:
248     Thread* _thread;
249     ConcurrentHashTable<VALUE, CONFIG, F>* _cht;
250    public:
251     ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht);
252     ~ScopedCS();
253   };
254 
255 
256   // Max number of deletes in one bucket chain during bulk delete.
257   static const size_t BULK_DELETE_LIMIT = 256;
258 
259   // Simple getters and setters for the internal table.
260   InternalTable* get_table() const;
261   InternalTable* get_new_table() const;
262   InternalTable* set_table_from_new();
263 
264   // Destroys all nodes.
265   void free_nodes();
266 
267   // Mask away high bits of hash.
bucket_idx_hash(InternalTable * table,const uintx hash)268   static size_t bucket_idx_hash(InternalTable* table, const uintx hash) {
269     return ((size_t)hash) & table->_hash_mask;
270   }
271 
272   // Returns bucket for hash for that internal table.
get_bucket_in(InternalTable * table,const uintx hash) const273   Bucket* get_bucket_in(InternalTable* table, const uintx hash) const {
274     size_t bucket_index = bucket_idx_hash(table, hash);
275     return table->get_bucket(bucket_index);
276   }
277 
278   // Return correct bucket for reading and handles resizing.
279   Bucket* get_bucket(const uintx hash) const;
280 
281   // Return correct bucket for updates and handles resizing.
282   Bucket* get_bucket_locked(Thread* thread, const uintx hash);
283 
284   // Finds a node.
285   template <typename LOOKUP_FUNC>
286   Node* get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
287                  bool* have_dead, size_t* loops = NULL) const;
288 
289   // Method for shrinking.
290   bool internal_shrink_prolog(Thread* thread, size_t log2_size);
291   void internal_shrink_epilog(Thread* thread);
292   void internal_shrink_range(Thread* thread, size_t start, size_t stop);
293   bool internal_shrink(Thread* thread, size_t size_limit_log2);
294 
295   // Methods for growing.
296   bool unzip_bucket(Thread* thread, InternalTable* old_table,
297                     InternalTable* new_table, size_t even_index,
298                     size_t odd_index);
299   bool internal_grow_prolog(Thread* thread, size_t log2_size);
300   void internal_grow_epilog(Thread* thread);
301   void internal_grow_range(Thread* thread, size_t start, size_t stop);
302   bool internal_grow(Thread* thread, size_t log2_size);
303 
304   // Get a value.
305   template <typename LOOKUP_FUNC>
306   VALUE* internal_get(Thread* thread, LOOKUP_FUNC& lookup_f,
307                       bool* grow_hint = NULL);
308 
309   // Insert which handles a number of cases.
310   template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
311   bool internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
312                        CALLBACK_FUNC& callback, bool* grow_hint = NULL);
313 
314   // Returns true if an item matching LOOKUP_FUNC is removed.
315   // Calls DELETE_FUNC before destroying the node.
316   template <typename LOOKUP_FUNC, typename DELETE_FUNC>
317   bool internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f,
318                        DELETE_FUNC& delete_f);
319 
320   // Visits nodes with FUNC.
321   template <typename FUNC>
322   static bool visit_nodes(Bucket* bucket, FUNC& visitor_f);
323 
324   // During shrink/grow we cannot guarantee that we only visit nodes once, with
325   // current algorithm. To keep it simple caller will have locked
326   // _resize_lock.
327   template <typename FUNC>
328   void do_scan_locked(Thread* thread, FUNC& scan_f);
329 
330   // Check for dead items in a bucket.
331   template <typename EVALUATE_FUNC>
332   size_t delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
333                             size_t num_del, Node** ndel);
334 
335   // Check for dead items in this table. During shrink/grow we cannot guarantee
336   // that we only visit nodes once. To keep it simple caller will have locked
337   // _resize_lock.
338   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
do_bulk_delete_locked(Thread * thread,EVALUATE_FUNC & eval_f,DELETE_FUNC & del_f)339   void do_bulk_delete_locked(Thread* thread, EVALUATE_FUNC& eval_f
340                              , DELETE_FUNC& del_f) {
341     do_bulk_delete_locked_for(thread, 0, _table->_size, eval_f, del_f);
342   }
343 
344   // To have prefetching for a VALUE that is pointer during
345   // do_bulk_delete_locked, we have this helper classes. One for non-pointer
346   // case without prefect and one for pointer with prefect.
347   template <bool b, typename EVALUATE_FUNC>
348   struct HaveDeletables {
349     static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
350                                Bucket* prefetch_bucket);
351   };
352   template<typename EVALUATE_FUNC>
353   struct HaveDeletables<true, EVALUATE_FUNC> {
354     static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
355                                Bucket* prefetch_bucket);
356   };
357 
358   // Check for dead items in this table with range. During shrink/grow we cannot
359   // guarantee that we only visit nodes once. To keep it simple caller will
360   // have locked _resize_lock.
361   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
362   void do_bulk_delete_locked_for(Thread* thread, size_t start_idx,
363                                  size_t stop_idx, EVALUATE_FUNC& eval_f,
364                                  DELETE_FUNC& del_f, bool is_mt = false);
365 
366   // Method to delete one items.
367   template <typename LOOKUP_FUNC>
368   void delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f);
369 
370  public:
371   ConcurrentHashTable(size_t log2size = DEFAULT_START_SIZE_LOG2,
372                       size_t log2size_limit = DEFAULT_MAX_SIZE_LOG2,
373                       size_t grow_hint = DEFAULT_GROW_HINT);
374 
375   ~ConcurrentHashTable();
376 
377   size_t get_size_log2(Thread* thread);
get_node_size() const378   size_t get_node_size() const { return sizeof(Node); }
is_max_size_reached()379   bool is_max_size_reached() { return _size_limit_reached; }
380 
381   // This means no paused bucket resize operation is going to resume
382   // on this table.
is_safepoint_safe()383   bool is_safepoint_safe() { return _resize_lock_owner == NULL; }
384 
385   // Re-size operations.
386   bool shrink(Thread* thread, size_t size_limit_log2 = 0);
387   bool grow(Thread* thread, size_t size_limit_log2 = 0);
388 
389   // All callbacks for get are under critical sections. Other callbacks may be
390   // under critical section or may have locked parts of table. Calling any
391   // methods on the table during a callback is not supported.Only MultiGetHandle
392   // supports multiple gets.
393 
394   // LOOKUP_FUNC is matching methods, VALUE_FUNC creates value to be inserted
395   // and CALLBACK_FUNC is called with new or old value. Returns true if the
396   // value already exists.
397   template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
get_insert_lazy(Thread * thread,LOOKUP_FUNC & lookup_f,VALUE_FUNC & val_f,CALLBACK_FUNC & callback_f,bool * grow_hint=NULL)398   bool get_insert_lazy(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& val_f,
399                        CALLBACK_FUNC& callback_f, bool* grow_hint = NULL) {
400     return !internal_insert(thread, lookup_f, val_f, callback_f, grow_hint);
401   }
402 
403   // Same without CALLBACK_FUNC.
404   template <typename LOOKUP_FUNC, typename VALUE_FUNC>
get_insert_lazy(Thread * thread,LOOKUP_FUNC & lookup_f,VALUE_FUNC & val_f,bool * grow_hint=NULL)405   bool get_insert_lazy(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& val_f,
406                        bool* grow_hint = NULL) {
407     return get_insert_lazy(thread, lookup_f, val_f, noOp, grow_hint);
408   }
409 
410   // Same without VALUE_FUNC.
411   template <typename LOOKUP_FUNC, typename CALLBACK_FUNC>
get_insert(Thread * thread,LOOKUP_FUNC & lookup_f,const VALUE & value,CALLBACK_FUNC & callback_f,bool * grow_hint=NULL)412   bool get_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
413                   CALLBACK_FUNC& callback_f, bool* grow_hint = NULL) {
414     LazyValueRetrieve vp(value);
415     return get_insert_lazy(thread, lookup_f, vp, callback_f, grow_hint);
416   }
417 
418   // Same without CALLBACK_FUNC and VALUE_FUNC.
419   template <typename LOOKUP_FUNC>
get_insert(Thread * thread,LOOKUP_FUNC & lookup_f,const VALUE & value,bool * grow_hint=NULL)420   bool get_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
421                   bool* grow_hint = NULL) {
422     return get_insert(thread, lookup_f, value, noOp, grow_hint);
423   }
424 
425   // Get methods return true on found item with LOOKUP_FUNC and FOUND_FUNC is
426   // called.
427   template <typename LOOKUP_FUNC, typename FOUND_FUNC>
428   bool get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& foundf,
429            bool* grow_hint = NULL);
430 
431   // Return a copy of an item found with LOOKUP_FUNC.
432   template <typename LOOKUP_FUNC>
433   VALUE get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint = NULL);
434 
435   // Returns true true if the item was inserted, duplicates are found with
436   // LOOKUP_FUNC.
437   template <typename LOOKUP_FUNC>
insert(Thread * thread,LOOKUP_FUNC & lookup_f,const VALUE & value,bool * grow_hint=NULL)438   bool insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
439               bool* grow_hint = NULL) {
440     LazyValueRetrieve vp(value);
441     return internal_insert(thread, lookup_f, vp, noOp, grow_hint);
442   }
443 
444   // This does a fast unsafe insert and can thus only be used when there is no
445   // risk for a duplicates and no other threads uses this table.
446   bool unsafe_insert(const VALUE& value);
447 
448   // Returns true if items was deleted matching LOOKUP_FUNC and
449   // prior to destruction DELETE_FUNC is called.
450   template <typename LOOKUP_FUNC, typename DELETE_FUNC>
remove(Thread * thread,LOOKUP_FUNC & lookup_f,DELETE_FUNC & del_f)451   bool remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& del_f) {
452     return internal_remove(thread, lookup_f, del_f);
453   }
454 
455   // Same without DELETE_FUNC.
456   template <typename LOOKUP_FUNC>
remove(Thread * thread,LOOKUP_FUNC & lookup_f)457   bool remove(Thread* thread, LOOKUP_FUNC& lookup_f) {
458     return internal_remove(thread, lookup_f, noOp);
459   }
460 
461   // Visit all items with SCAN_FUNC if no concurrent resize. Takes the resize
462   // lock to avoid concurrent resizes. Else returns false.
463   template <typename SCAN_FUNC>
464   bool try_scan(Thread* thread, SCAN_FUNC& scan_f);
465 
466   // Visit all items with SCAN_FUNC when the resize lock is obtained.
467   template <typename SCAN_FUNC>
468   void do_scan(Thread* thread, SCAN_FUNC& scan_f);
469 
470   // Visit all items with SCAN_FUNC without any protection.
471   // It will assume there is no other thread accessing this
472   // table during the safepoint. Must be called with VM thread.
473   template <typename SCAN_FUNC>
474   void do_safepoint_scan(SCAN_FUNC& scan_f);
475 
476   // Destroying items matching EVALUATE_FUNC, before destroying items
477   // DELETE_FUNC is called, if resize lock is obtained. Else returns false.
478   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
479   bool try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f,
480                        DELETE_FUNC& del_f);
481 
482   // Destroying items matching EVALUATE_FUNC, before destroying items
483   // DELETE_FUNC is called, when the resize lock is successfully obtained.
484   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
485   void bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f);
486 
487   // Writes statistics to the outputStream. Item sizes are calculated with
488   // VALUE_SIZE_FUNC.
489   template <typename VALUE_SIZE_FUNC>
490   void statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st,
491                      const char* table_name);
492 
493   // Moves all nodes from this table to to_cht
494   bool try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht);
495 
496   // This is a Curiously Recurring Template Pattern (CRPT) interface for the
497   // specialization.
498   struct BaseConfig {
499    public:
500     // Called when the hash table needs the hash for a VALUE.
get_hashConcurrentHashTable::BaseConfig501     static uintx get_hash(const VALUE& value, bool* dead) {
502       return CONFIG::get_hash(value, dead);
503     }
504     // On get_copy if no value is found then this value is returned.
notfoundConcurrentHashTable::BaseConfig505     static const VALUE& notfound() {
506       return CONFIG::notfound();
507     }
508     // Default node allocation.
509     static void* allocate_node(size_t size, const VALUE& value);
510     // Default node reclamation.
511     static void free_node(void* memory, const VALUE& value);
512   };
513 
514   // Scoped multi getter.
515   class MultiGetHandle : private ScopedCS {
516    public:
MultiGetHandle(Thread * thread,ConcurrentHashTable<VALUE,CONFIG,F> * cht)517     MultiGetHandle(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
518       : ScopedCS(thread, cht) {}
519     // In the MultiGetHandle scope you can lookup items matching LOOKUP_FUNC.
520     // The VALUEs are safe as long as you never save the VALUEs outside the
521     // scope, e.g. after ~MultiGetHandle().
522     template <typename LOOKUP_FUNC>
523     VALUE* get(LOOKUP_FUNC& lookup_f, bool* grow_hint = NULL);
524   };
525 
526  private:
527   class BucketsOperation;
528 
529  public:
530   class BulkDeleteTask;
531   class GrowTask;
532 };
533 
534 #endif // include guard
535