1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_hash_map_H
18 #define __TBB_concurrent_hash_map_H
19 
20 #define __TBB_concurrent_hash_map_H_include_area
21 #include "internal/_warning_suppress_enable_notice.h"
22 
23 #include "tbb_stddef.h"
24 #include <iterator>
25 #include <utility>      // Need std::pair
26 #include <cstring>      // Need std::memset
27 #include __TBB_STD_SWAP_HEADER
28 
29 #include "tbb_allocator.h"
30 #include "spin_rw_mutex.h"
31 #include "atomic.h"
32 #include "tbb_exception.h"
33 #include "tbb_profiling.h"
34 #include "aligned_space.h"
35 #include "internal/_tbb_hash_compare_impl.h"
36 #include "internal/_template_helpers.h"
37 #include "internal/_allocator_traits.h"
38 #if __TBB_INITIALIZER_LISTS_PRESENT
39 #include <initializer_list>
40 #endif
41 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
42 #include <typeinfo>
43 #endif
44 #if __TBB_STATISTICS
45 #include <stdio.h>
46 #endif
47 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
48 // Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
49 // for most of platforms, tuple present macro was added for logical correctness
50 #include <tuple>
51 #endif
52 
53 namespace tbb {
54 
55 namespace interface5 {
56 
57     template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
58     class concurrent_hash_map;
59 
60     //! @cond INTERNAL
61     namespace internal {
62     using namespace tbb::internal;
63 
64 
65     //! Type of a hash code.
66     typedef size_t hashcode_t;
67     //! Node base type
68     struct hash_map_node_base : tbb::internal::no_copy {
69         //! Mutex type
70         typedef spin_rw_mutex mutex_t;
71         //! Scoped lock type for mutex
72         typedef mutex_t::scoped_lock scoped_t;
73         //! Next node in chain
74         hash_map_node_base *next;
75         mutex_t mutex;
76     };
77     //! Incompleteness flag value
78     static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
79     //! Rehashed empty bucket flag
80     static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
81     //! base class of concurrent_hash_map
82     class hash_map_base {
83     public:
84         //! Size type
85         typedef size_t size_type;
86         //! Type of a hash code.
87         typedef size_t hashcode_t;
88         //! Segment index type
89         typedef size_t segment_index_t;
90         //! Node base type
91         typedef hash_map_node_base node_base;
92         //! Bucket type
93         struct bucket : tbb::internal::no_copy {
94             //! Mutex type for buckets
95             typedef spin_rw_mutex mutex_t;
96             //! Scoped lock type for mutex
97             typedef mutex_t::scoped_lock scoped_t;
98             mutex_t mutex;
99             node_base *node_list;
100         };
101         //! Count of segments in the first block
102         static size_type const embedded_block = 1;
103         //! Count of segments in the first block
104         static size_type const embedded_buckets = 1<<embedded_block;
105         //! Count of segments in the first block
106         static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
107         //! Size of a pointer / table size
108         static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
109         //! Segment pointer
110         typedef bucket *segment_ptr_t;
111         //! Segment pointers table type
112         typedef segment_ptr_t segments_table_t[pointers_per_table];
113         //! Hash mask = sum of allocated segment sizes - 1
114         atomic<hashcode_t> my_mask;
115         //! Segment pointers table. Also prevents false sharing between my_mask and my_size
116         segments_table_t my_table;
117         //! Size of container in stored items
118         atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
119         //! Zero segment
120         bucket my_embedded_segment[embedded_buckets];
121 #if __TBB_STATISTICS
122         atomic<unsigned> my_info_resizes; // concurrent ones
123         mutable atomic<unsigned> my_info_restarts; // race collisions
124         atomic<unsigned> my_info_rehashes;  // invocations of rehash_bucket
125 #endif
126         //! Constructor
hash_map_base()127         hash_map_base() {
128             std::memset(my_table, 0, sizeof(my_table));
129             my_mask = 0;
130             my_size = 0;
131             std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment));
132             for( size_type i = 0; i < embedded_block; i++ ) // fill the table
133                 my_table[i] = my_embedded_segment + segment_base(i);
134             my_mask = embedded_buckets - 1;
135             __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
136 #if __TBB_STATISTICS
137             my_info_resizes = 0; // concurrent ones
138             my_info_restarts = 0; // race collisions
139             my_info_rehashes = 0;  // invocations of rehash_bucket
140 #endif
141         }
142 
143         //! @return segment index of given index in the array
segment_index_of(size_type index)144         static segment_index_t segment_index_of( size_type index ) {
145             return segment_index_t( __TBB_Log2( index|1 ) );
146         }
147 
148         //! @return the first array index of given segment
segment_base(segment_index_t k)149         static segment_index_t segment_base( segment_index_t k ) {
150             return (segment_index_t(1)<<k & ~segment_index_t(1));
151         }
152 
153         //! @return segment size except for @arg k == 0
segment_size(segment_index_t k)154         static size_type segment_size( segment_index_t k ) {
155             return size_type(1)<<k; // fake value for k==0
156         }
157 
158         //! @return true if @arg ptr is valid pointer
is_valid(void * ptr)159         static bool is_valid( void *ptr ) {
160             return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
161         }
162 
163         //! Initialize buckets
init_buckets(segment_ptr_t ptr,size_type sz,bool is_initial)164         static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
165             if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
166             else for(size_type i = 0; i < sz; i++, ptr++) {
167                 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
168                 ptr->node_list = rehash_req;
169             }
170         }
171 
172         //! Add node @arg n to bucket @arg b
add_to_bucket(bucket * b,node_base * n)173         static void add_to_bucket( bucket *b, node_base *n ) {
174             __TBB_ASSERT(b->node_list != rehash_req, NULL);
175             n->next = b->node_list;
176             b->node_list = n; // its under lock and flag is set
177         }
178 
179         //! Exception safety helper
180         struct enable_segment_failsafe : tbb::internal::no_copy {
181             segment_ptr_t *my_segment_ptr;
enable_segment_failsafeenable_segment_failsafe182             enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
~enable_segment_failsafeenable_segment_failsafe183             ~enable_segment_failsafe() {
184                 if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
185             }
186         };
187 
188         //! Enable segment
189         template<typename Allocator>
190         void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
191             typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
192             typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
193             bucket_allocator_type bucket_allocator(allocator);
194             __TBB_ASSERT( k, "Zero segment must be embedded" );
195             enable_segment_failsafe watchdog( my_table, k );
196             size_type sz;
197             __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
198             if( k >= first_block ) {
199                 sz = segment_size( k );
200                 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
201                 init_buckets( ptr, sz, is_initial );
202                 itt_hide_store_word( my_table[k], ptr );
203                 sz <<= 1;// double it to get entire capacity of the container
204             } else { // the first block
205                 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
206                 sz = segment_size( first_block );
207                 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
208                 init_buckets( ptr, sz - embedded_buckets, is_initial );
209                 ptr -= segment_base(embedded_block);
210                 for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
211                     itt_hide_store_word( my_table[i], ptr + segment_base(i) );
212             }
213             itt_store_word_with_release( my_mask, sz-1 );
214             watchdog.my_segment_ptr = 0;
215         }
216 
217         template<typename Allocator>
delete_segment(segment_index_t s,const Allocator & allocator)218         void delete_segment(segment_index_t s, const Allocator& allocator) {
219             typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
220             typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
221             bucket_allocator_type bucket_allocator(allocator);
222             segment_ptr_t buckets_ptr = my_table[s];
223             size_type sz = segment_size( s ? s : 1 );
224 
225             if( s >= first_block) // the first segment or the next
226                 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
227             else if( s == embedded_block && embedded_block != first_block )
228                 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
229                                                     segment_size(first_block) - embedded_buckets);
230             if( s >= embedded_block ) my_table[s] = 0;
231         }
232 
233         //! Get bucket by (masked) hashcode
get_bucket(hashcode_t h)234         bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
235             segment_index_t s = segment_index_of( h );
236             h -= segment_base(s);
237             segment_ptr_t seg = my_table[s];
238             __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
239             return &seg[h];
240         }
241 
242         // internal serial rehashing helper
mark_rehashed_levels(hashcode_t h)243         void mark_rehashed_levels( hashcode_t h ) throw () {
244             segment_index_t s = segment_index_of( h );
245             while( segment_ptr_t seg = my_table[++s] )
246                 if( seg[h].node_list == rehash_req ) {
247                     seg[h].node_list = empty_rehashed;
248                     mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
249                 }
250         }
251 
252         //! Check for mask race
253         // Splitting into two functions should help inlining
check_mask_race(const hashcode_t h,hashcode_t & m)254         inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
255             hashcode_t m_now, m_old = m;
256             m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
257             if( m_old != m_now )
258                 return check_rehashing_collision( h, m_old, m = m_now );
259             return false;
260         }
261 
262         //! Process mask race, check for rehashing collision
check_rehashing_collision(const hashcode_t h,hashcode_t m_old,hashcode_t m)263         bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
264             __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
265             if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
266                 // condition above proves that 'h' has some other bits set beside 'm_old'
267                 // find next applicable mask after m_old    //TODO: look at bsl instruction
268                 for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
269                     ;
270                 m_old = (m_old<<1) - 1; // get full mask from a bit
271                 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
272                 // check whether it is rehashing/ed
273                 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
274                 {
275 #if __TBB_STATISTICS
276                     my_info_restarts++; // race collisions
277 #endif
278                     return true;
279                 }
280             }
281             return false;
282         }
283 
284         //! Insert a node and check for load factor. @return segment index to enable.
insert_new_node(bucket * b,node_base * n,hashcode_t mask)285         segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
286             size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
287             add_to_bucket( b, n );
288             // check load factor
289             if( sz >= mask ) { // TODO: add custom load_factor
290                 segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
291                 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
292                 static const segment_ptr_t is_allocating = (segment_ptr_t)2;
293                 if( !itt_hide_load_word(my_table[new_seg])
294                   && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
295                     return new_seg; // The value must be processed
296             }
297             return 0;
298         }
299 
300         //! Prepare enough segments for number of buckets
301         template<typename Allocator>
reserve(size_type buckets,const Allocator & allocator)302         void reserve(size_type buckets, const Allocator& allocator) {
303             if( !buckets-- ) return;
304             bool is_initial = !my_size;
305             for( size_type m = my_mask; buckets > m; m = my_mask )
306                 enable_segment( segment_index_of( m+1 ), allocator, is_initial );
307         }
308         //! Swap hash_map_bases
internal_swap(hash_map_base & table)309         void internal_swap(hash_map_base &table) {
310             using std::swap;
311             swap(this->my_mask, table.my_mask);
312             swap(this->my_size, table.my_size);
313             for(size_type i = 0; i < embedded_buckets; i++)
314                 swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
315             for(size_type i = embedded_block; i < pointers_per_table; i++)
316                 swap(this->my_table[i], table.my_table[i]);
317         }
318 
319 #if __TBB_CPP11_RVALUE_REF_PRESENT
internal_move(hash_map_base && other)320         void internal_move(hash_map_base&& other) {
321             my_mask = other.my_mask;
322             other.my_mask = embedded_buckets - 1;
323             my_size = other.my_size;
324             other.my_size = 0;
325 
326             for(size_type i = 0; i < embedded_buckets; ++i) {
327                 my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
328                 other.my_embedded_segment[i].node_list = NULL;
329             }
330 
331             for(size_type i = embedded_block; i < pointers_per_table; ++i) {
332                 my_table[i] = other.my_table[i];
333                 other.my_table[i] = NULL;
334             }
335         }
336 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
337     };
338 
339     template<typename Iterator>
340     class hash_map_range;
341 
342     //! Meets requirements of a forward iterator for STL */
343     /** Value is either the T or const T type of the container.
344         @ingroup containers */
345     template<typename Container, typename Value>
346     class hash_map_iterator
347         : public std::iterator<std::forward_iterator_tag,Value>
348     {
349         typedef Container map_type;
350         typedef typename Container::node node;
351         typedef hash_map_base::node_base node_base;
352         typedef hash_map_base::bucket bucket;
353 
354         template<typename C, typename T, typename U>
355         friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
356 
357         template<typename C, typename T, typename U>
358         friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
359 
360         template<typename C, typename T, typename U>
361         friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
362 
363         template<typename C, typename U>
364         friend class hash_map_iterator;
365 
366         template<typename I>
367         friend class hash_map_range;
368 
advance_to_next_bucket()369         void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
370             size_t k = my_index+1;
371             __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
372             while( k <= my_map->my_mask ) {
373                 // Following test uses 2's-complement wizardry
374                 if( k&(k-2) ) // not the beginning of a segment
375                     ++my_bucket;
376                 else my_bucket = my_map->get_bucket( k );
377                 my_node = static_cast<node*>( my_bucket->node_list );
378                 if( hash_map_base::is_valid(my_node) ) {
379                     my_index = k; return;
380                 }
381                 ++k;
382             }
383             my_bucket = 0; my_node = 0; my_index = k; // the end
384         }
385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
386         template<typename Key, typename T, typename HashCompare, typename A>
387         friend class interface5::concurrent_hash_map;
388 #else
389     public: // workaround
390 #endif
391         //! concurrent_hash_map over which we are iterating.
392         const Container *my_map;
393 
394         //! Index in hash table for current item
395         size_t my_index;
396 
397         //! Pointer to bucket
398         const bucket *my_bucket;
399 
400         //! Pointer to node that has current item
401         node *my_node;
402 
403         hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
404 
405     public:
406         //! Construct undefined iterator
hash_map_iterator()407         hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
hash_map_iterator(const hash_map_iterator<Container,typename Container::value_type> & other)408         hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
409             my_map(other.my_map),
410             my_index(other.my_index),
411             my_bucket(other.my_bucket),
412             my_node(other.my_node)
413         {}
414 
415         hash_map_iterator& operator=( const hash_map_iterator<Container,typename Container::value_type> &other ) {
416             my_map = other.my_map;
417             my_index = other.my_index;
418             my_bucket = other.my_bucket;
419             my_node = other.my_node;
420             return *this;
421         }
422         Value& operator*() const {
423             __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
424             return my_node->value();
425         }
426         Value* operator->() const {return &operator*();}
427         hash_map_iterator& operator++();
428 
429         //! Post increment
430         hash_map_iterator operator++(int) {
431             hash_map_iterator old(*this);
432             operator++();
433             return old;
434         }
435     };
436 
437     template<typename Container, typename Value>
hash_map_iterator(const Container & map,size_t index,const bucket * b,node_base * n)438     hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
439         my_map(&map),
440         my_index(index),
441         my_bucket(b),
442         my_node( static_cast<node*>(n) )
443     {
444         if( b && !hash_map_base::is_valid(n) )
445             advance_to_next_bucket();
446     }
447 
448     template<typename Container, typename Value>
449     hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
450         my_node = static_cast<node*>( my_node->next );
451         if( !my_node ) advance_to_next_bucket();
452         return *this;
453     }
454 
455     template<typename Container, typename T, typename U>
456     bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
457         return i.my_node == j.my_node && i.my_map == j.my_map;
458     }
459 
460     template<typename Container, typename T, typename U>
461     bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
462         return i.my_node != j.my_node || i.my_map != j.my_map;
463     }
464 
465     //! Range class used with concurrent_hash_map
466     /** @ingroup containers */
467     template<typename Iterator>
468     class hash_map_range {
469         typedef typename Iterator::map_type map_type;
470         Iterator my_begin;
471         Iterator my_end;
472         mutable Iterator my_midpoint;
473         size_t my_grainsize;
474         //! Set my_midpoint to point approximately half way between my_begin and my_end.
475         void set_midpoint() const;
476         template<typename U> friend class hash_map_range;
477     public:
478         //! Type for size of a range
479         typedef std::size_t size_type;
480         typedef typename Iterator::value_type value_type;
481         typedef typename Iterator::reference reference;
482         typedef typename Iterator::difference_type difference_type;
483         typedef Iterator iterator;
484 
485         //! True if range is empty.
empty()486         bool empty() const {return my_begin==my_end;}
487 
488         //! True if range can be partitioned into two subranges.
is_divisible()489         bool is_divisible() const {
490             return my_midpoint!=my_end;
491         }
492         //! Split range.
hash_map_range(hash_map_range & r,split)493         hash_map_range( hash_map_range& r, split ) :
494             my_end(r.my_end),
495             my_grainsize(r.my_grainsize)
496         {
497             r.my_end = my_begin = r.my_midpoint;
498             __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
499             __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
500             set_midpoint();
501             r.set_midpoint();
502         }
503         //! type conversion
504         template<typename U>
hash_map_range(hash_map_range<U> & r)505         hash_map_range( hash_map_range<U>& r) :
506             my_begin(r.my_begin),
507             my_end(r.my_end),
508             my_midpoint(r.my_midpoint),
509             my_grainsize(r.my_grainsize)
510         {}
511         //! Init range with container and grainsize specified
512         hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
513             my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
514             my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
515             my_grainsize( grainsize_ )
516         {
517             __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
518             set_midpoint();
519         }
begin()520         const Iterator& begin() const {return my_begin;}
end()521         const Iterator& end() const {return my_end;}
522         //! The grain size for this range.
grainsize()523         size_type grainsize() const {return my_grainsize;}
524     };
525 
526     template<typename Iterator>
set_midpoint()527     void hash_map_range<Iterator>::set_midpoint() const {
528         // Split by groups of nodes
529         size_t m = my_end.my_index-my_begin.my_index;
530         if( m > my_grainsize ) {
531             m = my_begin.my_index + m/2u;
532             hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
533             my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
534         } else {
535             my_midpoint = my_end;
536         }
537         __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
538             "my_begin is after my_midpoint" );
539         __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
540             "my_midpoint is after my_end" );
541         __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
542             "[my_begin, my_midpoint) range should not be empty" );
543     }
544 
545     } // internal
546 //! @endcond
547 
548 #if _MSC_VER && !defined(__INTEL_COMPILER)
549     // Suppress "conditional expression is constant" warning.
550     #pragma warning( push )
551     #pragma warning( disable: 4127 )
552 #endif
553 
554 //! Unordered map from Key to T.
555 /** concurrent_hash_map is associative container with concurrent access.
556 
557 @par Compatibility
558     The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).
559 
560 @par Exception Safety
561     - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.
562     - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).
563     - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results.
564 
565 @par Changes since TBB 2.1
566     - Replaced internal algorithm and data structure. Patent is pending.
567     - Added buckets number argument for constructor
568 
569 @par Changes since TBB 2.0
570     - Fixed exception-safety
571     - Added template argument for allocator
572     - Added allocator argument in constructors
573     - Added constructor from a range of iterators
574     - Added several new overloaded insert() methods
575     - Added get_allocator()
576     - Added swap()
577     - Added count()
578     - Added overloaded erase(accessor &) and erase(const_accessor&)
579     - Added equal_range() [const]
580     - Added [const_]pointer, [const_]reference, and allocator_type types
581     - Added global functions: operator==(), operator!=(), and swap()
582 
583     @ingroup containers */
584 template<typename Key, typename T, typename HashCompare, typename Allocator>
585 class concurrent_hash_map : protected internal::hash_map_base {
586     template<typename Container, typename Value>
587     friend class internal::hash_map_iterator;
588 
589     template<typename I>
590     friend class internal::hash_map_range;
591 
592 public:
593     typedef Key key_type;
594     typedef T mapped_type;
595     typedef std::pair<const Key,T> value_type;
596     typedef hash_map_base::size_type size_type;
597     typedef ptrdiff_t difference_type;
598     typedef value_type *pointer;
599     typedef const value_type *const_pointer;
600     typedef value_type &reference;
601     typedef const value_type &const_reference;
602     typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
603     typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
604     typedef internal::hash_map_range<iterator> range_type;
605     typedef internal::hash_map_range<const_iterator> const_range_type;
606     typedef Allocator allocator_type;
607 
608 protected:
609     friend class const_accessor;
610     class node;
611     typedef typename tbb::internal::allocator_rebind<Allocator, node>::type node_allocator_type;
612     typedef tbb::internal::allocator_traits<node_allocator_type> node_allocator_traits;
613     node_allocator_type my_allocator;
614     HashCompare my_hash_compare;
615 
616     class node : public node_base {
617         tbb::aligned_space<value_type> my_value;
618     public:
storage()619         value_type* storage() { return my_value.begin(); }
value()620         value_type& value() { return *storage(); }
621     };
622 
delete_node(node_base * n)623     void delete_node( node_base *n ) {
624         node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
625         node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
626         node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
627     }
628 
629     struct node_scoped_guard : tbb::internal::no_copy {
630         node* my_node;
631         node_allocator_type& my_alloc;
632 
node_scoped_guardnode_scoped_guard633         node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {}
~node_scoped_guardnode_scoped_guard634         ~node_scoped_guard() {
635             if(my_node) {
636                 node_allocator_traits::destroy(my_alloc, my_node);
637                 node_allocator_traits::deallocate(my_alloc, my_node, 1);
638             }
639         }
dismissnode_scoped_guard640         void dismiss() { my_node = NULL; }
641     };
642 
643 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
644     template<typename... Args>
create_node(node_allocator_type & allocator,Args &&...args)645     static node* create_node(node_allocator_type& allocator, Args&&... args)
646 #else
647     template<typename Arg1, typename Arg2>
648     static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
649 #endif
650     {
651         node* node_ptr = node_allocator_traits::allocate(allocator, 1);
652         node_scoped_guard guard(node_ptr, allocator);
653         node_allocator_traits::construct(allocator, node_ptr);
654 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
655         node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
656 #else
657         node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
658 #endif
659         guard.dismiss();
660         return node_ptr;
661     }
662 
allocate_node_copy_construct(node_allocator_type & allocator,const Key & key,const T * t)663     static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
664         return create_node(allocator, key, *t);
665     }
666 
667 #if __TBB_CPP11_RVALUE_REF_PRESENT
allocate_node_move_construct(node_allocator_type & allocator,const Key & key,const T * t)668     static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
669         return create_node(allocator, key, std::move(*const_cast<T*>(t)));
670     }
671 #endif
672 
allocate_node_default_construct(node_allocator_type & allocator,const Key & key,const T *)673     static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
674 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
675         // Emplace construct an empty T object inside the pair
676         return create_node(allocator, std::piecewise_construct,
677                            std::forward_as_tuple(key), std::forward_as_tuple());
678 #else
679         // Use of a temporary object is impossible, because create_node takes a non-const reference.
680         // copy-initialization is possible because T is already required to be CopyConstructible.
681         T obj = T();
682         return create_node(allocator, key, tbb::internal::move(obj));
683 #endif
684     }
685 
do_not_allocate_node(node_allocator_type &,const Key &,const T *)686     static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
687         __TBB_ASSERT(false,"this dummy function should not be called");
688         return NULL;
689     }
690 
search_bucket(const key_type & key,bucket * b)691     node *search_bucket( const key_type &key, bucket *b ) const {
692         node *n = static_cast<node*>( b->node_list );
693         while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
694             n = static_cast<node*>( n->next );
695         __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
696         return n;
697     }
698 
699     //! bucket accessor is to find, rehash, acquire a lock, and access a bucket
700     class bucket_accessor : public bucket::scoped_t {
701         bucket *my_b;
702     public:
703         bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
704         //! find a bucket by masked hashcode, optionally rehash, and acquire the lock
705         inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
706             my_b = base->get_bucket( h );
707             // TODO: actually, notification is unnecessary here, just hiding double-check
708             if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
709                 && try_acquire( my_b->mutex, /*write=*/true ) )
710             {
711                 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
712             }
713             else bucket::scoped_t::acquire( my_b->mutex, writer );
714             __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
715         }
716         //! check whether bucket is locked for write
is_writer()717         bool is_writer() { return bucket::scoped_t::is_writer; }
718         //! get bucket pointer
operator()719         bucket *operator() () { return my_b; }
720     };
721 
722     // TODO refactor to hash_base
rehash_bucket(bucket * b_new,const hashcode_t h)723     void rehash_bucket( bucket *b_new, const hashcode_t h ) {
724         __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
725         __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
726         __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed
727         hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
728 #if __TBB_STATISTICS
729         my_info_rehashes++; // invocations of rehash_bucket
730 #endif
731 
732         bucket_accessor b_old( this, h & mask );
733 
734         mask = (mask<<1) | 1; // get full mask for new bucket
735         __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
736     restart:
737         for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
738             hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
739 #if TBB_USE_ASSERT
740             hashcode_t bmask = h & (mask>>1);
741             bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
742             __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
743 #endif
744             if( (c & mask) == h ) {
745                 if( !b_old.is_writer() )
746                     if( !b_old.upgrade_to_writer() ) {
747                         goto restart; // node ptr can be invalid due to concurrent erase
748                     }
749                 *p = n->next; // exclude from b_old
750                 add_to_bucket( b_new, n );
751             } else p = &n->next; // iterate to next item
752         }
753     }
754 
755     struct call_clear_on_leave {
756         concurrent_hash_map* my_ch_map;
call_clear_on_leavecall_clear_on_leave757         call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {}
dismisscall_clear_on_leave758         void dismiss() {my_ch_map = 0;}
~call_clear_on_leavecall_clear_on_leave759         ~call_clear_on_leave(){
760             if (my_ch_map){
761                 my_ch_map->clear();
762             }
763         }
764     };
765 public:
766 
767     class accessor;
768     //! Combines data access, locking, and garbage collection.
769     class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
770         friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
771         friend class accessor;
772     public:
773         //! Type of value
774         typedef const typename concurrent_hash_map::value_type value_type;
775 
776         //! True if result is empty.
empty()777         bool empty() const { return !my_node; }
778 
779         //! Set to null
release()780         void release() {
781             if( my_node ) {
782                 node::scoped_t::release();
783                 my_node = 0;
784             }
785         }
786 
787         //! Return reference to associated value in hash table.
788         const_reference operator*() const {
789             __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
790             return my_node->value();
791         }
792 
793         //! Return pointer to associated value in hash table.
794         const_pointer operator->() const {
795             return &operator*();
796         }
797 
798         //! Create empty result
const_accessor()799         const_accessor() : my_node(NULL) {}
800 
801         //! Destroy result after releasing the underlying reference.
~const_accessor()802         ~const_accessor() {
803             my_node = NULL; // scoped lock's release() is called in its destructor
804         }
805     protected:
is_writer()806         bool is_writer() { return node::scoped_t::is_writer; }
807         node *my_node;
808         hashcode_t my_hash;
809     };
810 
811     //! Allows write access to elements and combines data access, locking, and garbage collection.
812     class accessor: public const_accessor {
813     public:
814         //! Type of value
815         typedef typename concurrent_hash_map::value_type value_type;
816 
817         //! Return reference to associated value in hash table.
818         reference operator*() const {
819             __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
820             return this->my_node->value();
821         }
822 
823         //! Return pointer to associated value in hash table.
824         pointer operator->() const {
825             return &operator*();
826         }
827     };
828 
829     //! Construct empty table.
830     explicit concurrent_hash_map( const allocator_type &a = allocator_type() )
hash_map_base()831         : internal::hash_map_base(), my_allocator(a)
832     {}
833 
834     explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
hash_map_base()835         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
836     {}
837 
838     //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
839     concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
hash_map_base()840         : internal::hash_map_base(), my_allocator(a)
841     {
842         reserve( n, my_allocator );
843     }
844 
845     concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
hash_map_base()846         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
847     {
848         reserve( n, my_allocator );
849     }
850 
851     //! Copy constructor
concurrent_hash_map(const concurrent_hash_map & table)852     concurrent_hash_map( const concurrent_hash_map &table )
853         : internal::hash_map_base(),
854           my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
855     {
856         call_clear_on_leave scope_guard(this);
857         internal_copy(table);
858         scope_guard.dismiss();
859     }
860 
concurrent_hash_map(const concurrent_hash_map & table,const allocator_type & a)861     concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
862         : internal::hash_map_base(), my_allocator(a)
863     {
864         call_clear_on_leave scope_guard(this);
865         internal_copy(table);
866         scope_guard.dismiss();
867     }
868 
869 #if __TBB_CPP11_RVALUE_REF_PRESENT
870     //! Move constructor
concurrent_hash_map(concurrent_hash_map && table)871     concurrent_hash_map( concurrent_hash_map &&table )
872         : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
873     {
874         internal_move(std::move(table));
875     }
876 
877     //! Move constructor
concurrent_hash_map(concurrent_hash_map && table,const allocator_type & a)878     concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
879         : internal::hash_map_base(), my_allocator(a)
880     {
881         if (a == table.get_allocator()){
882             internal_move(std::move(table));
883         }else{
884             call_clear_on_leave scope_guard(this);
885             internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
886             scope_guard.dismiss();
887         }
888     }
889 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
890 
891     //! Construction with copying iteration range and given allocator instance
892     template<typename I>
893     concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
hash_map_base()894         : internal::hash_map_base(), my_allocator(a)
895     {
896         call_clear_on_leave scope_guard(this);
897         internal_copy(first, last, std::distance(first, last));
898         scope_guard.dismiss();
899     }
900 
901     template<typename I>
902     concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
hash_map_base()903         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
904     {
905         call_clear_on_leave scope_guard(this);
906         internal_copy(first, last, std::distance(first, last));
907         scope_guard.dismiss();
908     }
909 
910 #if __TBB_INITIALIZER_LISTS_PRESENT
911     //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
912     concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
hash_map_base()913         : internal::hash_map_base(), my_allocator(a)
914     {
915         call_clear_on_leave scope_guard(this);
916         internal_copy(il.begin(), il.end(), il.size());
917         scope_guard.dismiss();
918     }
919 
920     concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
hash_map_base()921         : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
922     {
923         call_clear_on_leave scope_guard(this);
924         internal_copy(il.begin(), il.end(), il.size());
925         scope_guard.dismiss();
926     }
927 
928 #endif //__TBB_INITIALIZER_LISTS_PRESENT
929 
930     //! Assignment
931     concurrent_hash_map& operator=( const concurrent_hash_map &table ) {
932         if( this!=&table ) {
933             typedef typename node_allocator_traits::propagate_on_container_copy_assignment pocca_type;
934             clear();
935             tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type());
936             internal_copy(table);
937         }
938         return *this;
939     }
940 
941 #if __TBB_CPP11_RVALUE_REF_PRESENT
942     //! Move Assignment
943     concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
944         if(this != &table) {
945             typedef typename node_allocator_traits::propagate_on_container_move_assignment pocma_type;
946             internal_move_assign(std::move(table), pocma_type());
947         }
948         return *this;
949     }
950 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
951 
952 #if __TBB_INITIALIZER_LISTS_PRESENT
953     //! Assignment
954     concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
955         clear();
956         internal_copy(il.begin(), il.end(), il.size());
957         return *this;
958     }
959 #endif //__TBB_INITIALIZER_LISTS_PRESENT
960 
961 
962     //! Rehashes and optionally resizes the whole table.
963     /** Useful to optimize performance before or after concurrent operations.
964         Also enables using of find() and count() concurrent methods in serial context. */
965     void rehash(size_type n = 0);
966 
967     //! Clear table
968     void clear();
969 
970     //! Clear table and destroy it.
~concurrent_hash_map()971     ~concurrent_hash_map() { clear(); }
972 
973     //------------------------------------------------------------------------
974     // Parallel algorithm support
975     //------------------------------------------------------------------------
976     range_type range( size_type grainsize=1 ) {
977         return range_type( *this, grainsize );
978     }
979     const_range_type range( size_type grainsize=1 ) const {
980         return const_range_type( *this, grainsize );
981     }
982 
983     //------------------------------------------------------------------------
984     // STL support - not thread-safe methods
985     //------------------------------------------------------------------------
begin()986     iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
end()987     iterator end() { return iterator( *this, 0, 0, 0 ); }
begin()988     const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
end()989     const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
equal_range(const Key & key)990     std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
equal_range(const Key & key)991     std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
992 
993     //! Number of items in table.
size()994     size_type size() const { return my_size; }
995 
996     //! True if size()==0.
empty()997     bool empty() const { return my_size == 0; }
998 
999     //! Upper bound on size.
max_size()1000     size_type max_size() const {return (~size_type(0))/sizeof(node);}
1001 
1002     //! Returns the current number of buckets
bucket_count()1003     size_type bucket_count() const { return my_mask+1; }
1004 
1005     //! return allocator object
get_allocator()1006     allocator_type get_allocator() const { return this->my_allocator; }
1007 
1008     //! swap two instances. Iterators are invalidated
1009     void swap( concurrent_hash_map &table );
1010 
1011     //------------------------------------------------------------------------
1012     // concurrent map operations
1013     //------------------------------------------------------------------------
1014 
1015     //! Return count of items (0 or 1)
count(const Key & key)1016     size_type count( const Key &key ) const {
1017         return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
1018     }
1019 
1020     //! Find item and acquire a read lock on the item.
1021     /** Return true if item is found, false otherwise. */
find(const_accessor & result,const Key & key)1022     bool find( const_accessor &result, const Key &key ) const {
1023         result.release();
1024         return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
1025     }
1026 
1027     //! Find item and acquire a write lock on the item.
1028     /** Return true if item is found, false otherwise. */
find(accessor & result,const Key & key)1029     bool find( accessor &result, const Key &key ) {
1030         result.release();
1031         return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1032     }
1033 
1034     //! Insert item (if not already present) and acquire a read lock on the item.
1035     /** Returns true if item is new. */
insert(const_accessor & result,const Key & key)1036     bool insert( const_accessor &result, const Key &key ) {
1037         result.release();
1038         return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1039     }
1040 
1041     //! Insert item (if not already present) and acquire a write lock on the item.
1042     /** Returns true if item is new. */
insert(accessor & result,const Key & key)1043     bool insert( accessor &result, const Key &key ) {
1044         result.release();
1045         return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1046     }
1047 
1048     //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
1049     /** Returns true if item is new. */
insert(const_accessor & result,const value_type & value)1050     bool insert( const_accessor &result, const value_type &value ) {
1051         result.release();
1052         return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1053     }
1054 
1055     //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
1056     /** Returns true if item is new. */
insert(accessor & result,const value_type & value)1057     bool insert( accessor &result, const value_type &value ) {
1058         result.release();
1059         return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1060     }
1061 
1062     //! Insert item by copying if there is no such key present already
1063     /** Returns true if item is inserted. */
insert(const value_type & value)1064     bool insert( const value_type &value ) {
1065         return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1066     }
1067 
1068 #if __TBB_CPP11_RVALUE_REF_PRESENT
1069     //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
1070     /** Returns true if item is new. */
insert(const_accessor & result,value_type && value)1071     bool insert( const_accessor &result, value_type && value ) {
1072         return generic_move_insert(result, std::move(value));
1073     }
1074 
1075     //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
1076     /** Returns true if item is new. */
insert(accessor & result,value_type && value)1077     bool insert( accessor &result, value_type && value ) {
1078         return generic_move_insert(result, std::move(value));
1079     }
1080 
1081     //! Insert item by copying if there is no such key present already
1082     /** Returns true if item is inserted. */
insert(value_type && value)1083     bool insert( value_type && value ) {
1084         return generic_move_insert(accessor_not_used(), std::move(value));
1085     }
1086 
1087 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1088     //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
1089     /** Returns true if item is new. */
1090     template<typename... Args>
emplace(const_accessor & result,Args &&...args)1091     bool emplace( const_accessor &result, Args&&... args ) {
1092         return generic_emplace(result, std::forward<Args>(args)...);
1093     }
1094 
1095     //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
1096     /** Returns true if item is new. */
1097     template<typename... Args>
emplace(accessor & result,Args &&...args)1098     bool emplace( accessor &result, Args&&... args ) {
1099         return generic_emplace(result, std::forward<Args>(args)...);
1100     }
1101 
1102     //! Insert item by copying if there is no such key present already
1103     /** Returns true if item is inserted. */
1104     template<typename... Args>
emplace(Args &&...args)1105     bool emplace( Args&&... args ) {
1106         return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1107     }
1108 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1109 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1110 
1111     //! Insert range [first, last)
1112     template<typename I>
insert(I first,I last)1113     void insert( I first, I last ) {
1114         for ( ; first != last; ++first )
1115             insert( *first );
1116     }
1117 
1118 #if __TBB_INITIALIZER_LISTS_PRESENT
1119     //! Insert initializer list
insert(std::initializer_list<value_type> il)1120     void insert( std::initializer_list<value_type> il ) {
1121         insert( il.begin(), il.end() );
1122     }
1123 #endif //__TBB_INITIALIZER_LISTS_PRESENT
1124 
1125     //! Erase item.
1126     /** Return true if item was erased by particularly this call. */
1127     bool erase( const Key& key );
1128 
1129     //! Erase item by const_accessor.
1130     /** Return true if item was erased by particularly this call. */
erase(const_accessor & item_accessor)1131     bool erase( const_accessor& item_accessor ) {
1132         return exclude( item_accessor );
1133     }
1134 
1135     //! Erase item by accessor.
1136     /** Return true if item was erased by particularly this call. */
erase(accessor & item_accessor)1137     bool erase( accessor& item_accessor ) {
1138         return exclude( item_accessor );
1139     }
1140 
1141 protected:
1142     //! Insert or find item and optionally acquire a lock on the item.
1143     bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write,  node* (*allocate_node)(node_allocator_type& ,  const Key &, const T * ), node *tmp_n = 0  ) ;
1144 
releaseaccessor_not_used1145     struct accessor_not_used { void release(){}};
accessor_location(accessor_not_used const &)1146     friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
accessor_location(const_accessor & a)1147     friend const_accessor* accessor_location( const_accessor & a )      { return &a;}
1148 
is_write_access_needed(accessor const &)1149     friend bool is_write_access_needed( accessor const& )           { return true;}
is_write_access_needed(const_accessor const &)1150     friend bool is_write_access_needed( const_accessor const& )     { return false;}
is_write_access_needed(accessor_not_used const &)1151     friend bool is_write_access_needed( accessor_not_used const& )  { return false;}
1152 
1153 #if __TBB_CPP11_RVALUE_REF_PRESENT
1154     template<typename Accessor>
generic_move_insert(Accessor && result,value_type && value)1155     bool generic_move_insert( Accessor && result, value_type && value ) {
1156         result.release();
1157         return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1158     }
1159 
1160 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1161     template<typename Accessor, typename... Args>
generic_emplace(Accessor && result,Args &&...args)1162     bool generic_emplace( Accessor && result, Args &&... args ) {
1163         result.release();
1164         node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1165         return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1166     }
1167 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1168 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1169 
1170     //! delete item by accessor
1171     bool exclude( const_accessor &item_accessor );
1172 
1173     //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
1174     template<typename I>
1175     std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1176 
1177     //! Copy "source" to *this, where *this must start out empty.
1178     void internal_copy( const concurrent_hash_map& source );
1179 
1180     template<typename I>
1181     void internal_copy( I first, I last, size_type reserve_size );
1182 
1183 #if __TBB_CPP11_RVALUE_REF_PRESENT
1184     // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type
internal_move_assign(concurrent_hash_map && other,tbb::internal::traits_true_type)1185     void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_true_type) {
1186         tbb::internal::allocator_move_assignment(my_allocator, other.my_allocator, tbb::internal::traits_true_type());
1187         internal_move(std::move(other));
1188     }
1189 
internal_move_assign(concurrent_hash_map && other,tbb::internal::traits_false_type)1190     void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_false_type) {
1191         if (this->my_allocator == other.my_allocator) {
1192             internal_move(std::move(other));
1193         } else {
1194             //do per element move
1195             internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1196         }
1197     }
1198 #endif
1199 
1200     //! Fast find when no concurrent erasure is used. For internal use inside TBB only!
1201     /** Return pointer to item with given key, or NULL if no such item exists.
1202         Must not be called concurrently with erasure operations. */
internal_fast_find(const Key & key)1203     const_pointer internal_fast_find( const Key& key ) const {
1204         hashcode_t h = my_hash_compare.hash( key );
1205         hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1206         node *n;
1207     restart:
1208         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1209         bucket *b = get_bucket( h & m );
1210         // TODO: actually, notification is unnecessary here, just hiding double-check
1211         if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
1212         {
1213             bucket::scoped_t lock;
1214             if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1215                 if( b->node_list == internal::rehash_req)
1216                     const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1217             }
1218             else lock.acquire( b->mutex, /*write=*/false );
1219             __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
1220         }
1221         n = search_bucket( key, b );
1222         if( n )
1223             return n->storage();
1224         else if( check_mask_race( h, m ) )
1225             goto restart;
1226         return 0;
1227     }
1228 };
1229 
1230 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1231 namespace internal {
1232 using namespace tbb::internal;
1233 
1234 template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1235 using hash_map_t = Map<
1236     Key, T,
1237     std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1238                         pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1239     std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1240                          pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1241 >;
1242 }
1243 
1244 // Deduction guide for the constructor from two iterators and hash_compare/ allocator
1245 template<typename I, typename... Args>
1246 concurrent_hash_map(I, I, Args...)
1247 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1248 
1249 // Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
1250 // Deduction guide for an initializer_list, hash_compare and allocator is implicit
1251 template<typename Key, typename T, typename CompareOrAllocator>
1252 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1253 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1254 
1255 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1256 
1257 template<typename Key, typename T, typename HashCompare, typename A>
lookup(bool op_insert,const Key & key,const T * t,const_accessor * result,bool write,node * (* allocate_node)(node_allocator_type &,const Key &,const T *),node * tmp_n)1258 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1259     __TBB_ASSERT( !result || !result->my_node, NULL );
1260     bool return_value;
1261     hashcode_t const h = my_hash_compare.hash( key );
1262     hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1263     segment_index_t grow_segment = 0;
1264     node *n;
1265     restart:
1266     {//lock scope
1267         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1268         return_value = false;
1269         // get bucket
1270         bucket_accessor b( this, h & m );
1271 
1272         // find a node
1273         n = search_bucket( key, b() );
1274         if( op_insert ) {
1275             // [opt] insert a key
1276             if( !n ) {
1277                 if( !tmp_n ) {
1278                     tmp_n = allocate_node(my_allocator, key, t);
1279                 }
1280                 if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1281                     // Rerun search_list, in case another thread inserted the item during the upgrade.
1282                     n = search_bucket( key, b() );
1283                     if( is_valid(n) ) { // unfortunately, it did
1284                         b.downgrade_to_reader();
1285                         goto exists;
1286                     }
1287                 }
1288                 if( check_mask_race(h, m) )
1289                     goto restart; // b.release() is done in ~b().
1290                 // insert and set flag to grow the container
1291                 grow_segment = insert_new_node( b(), n = tmp_n, m );
1292                 tmp_n = 0;
1293                 return_value = true;
1294             }
1295         } else { // find or count
1296             if( !n ) {
1297                 if( check_mask_race( h, m ) )
1298                     goto restart; // b.release() is done in ~b(). TODO: replace by continue
1299                 return false;
1300             }
1301             return_value = true;
1302         }
1303     exists:
1304         if( !result ) goto check_growth;
1305         // TODO: the following seems as generic/regular operation
1306         // acquire the item
1307         if( !result->try_acquire( n->mutex, write ) ) {
1308             for( tbb::internal::atomic_backoff backoff(true);; ) {
1309                 if( result->try_acquire( n->mutex, write ) ) break;
1310                 if( !backoff.bounded_pause() ) {
1311                     // the wait takes really long, restart the operation
1312                     b.release();
1313                     __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1314                     __TBB_Yield();
1315                     m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1316                     goto restart;
1317                 }
1318             }
1319         }
1320     }//lock scope
1321     result->my_node = n;
1322     result->my_hash = h;
1323 check_growth:
1324     // [opt] grow the container
1325     if( grow_segment ) {
1326 #if __TBB_STATISTICS
1327         my_info_resizes++; // concurrent ones
1328 #endif
1329         enable_segment( grow_segment, my_allocator );
1330     }
1331     if( tmp_n ) // if op_insert only
1332         delete_node( tmp_n );
1333     return return_value;
1334 }
1335 
1336 template<typename Key, typename T, typename HashCompare, typename A>
1337 template<typename I>
internal_equal_range(const Key & key,I end_)1338 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1339     hashcode_t h = my_hash_compare.hash( key );
1340     hashcode_t m = my_mask;
1341     __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1342     h &= m;
1343     bucket *b = get_bucket( h );
1344     while( b->node_list == internal::rehash_req ) {
1345         m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1346         b = get_bucket( h &= m );
1347     }
1348     node *n = search_bucket( key, b );
1349     if( !n )
1350         return std::make_pair(end_, end_);
1351     iterator lower(*this, h, b, n), upper(lower);
1352     return std::make_pair(lower, ++upper);
1353 }
1354 
1355 template<typename Key, typename T, typename HashCompare, typename A>
exclude(const_accessor & item_accessor)1356 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor ) {
1357     __TBB_ASSERT( item_accessor.my_node, NULL );
1358     node_base *const n = item_accessor.my_node;
1359     hashcode_t const h = item_accessor.my_hash;
1360     hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1361     do {
1362         // get bucket
1363         bucket_accessor b( this, h & m, /*writer=*/true );
1364         node_base **p = &b()->node_list;
1365         while( *p && *p != n )
1366             p = &(*p)->next;
1367         if( !*p ) { // someone else was first
1368             if( check_mask_race( h, m ) )
1369                 continue;
1370             item_accessor.release();
1371             return false;
1372         }
1373         __TBB_ASSERT( *p == n, NULL );
1374         *p = n->next; // remove from container
1375         my_size--;
1376         break;
1377     } while(true);
1378     if( !item_accessor.is_writer() ) // need to get exclusive lock
1379         item_accessor.upgrade_to_writer(); // return value means nothing here
1380     item_accessor.release();
1381     delete_node( n ); // Only one thread can delete it
1382     return true;
1383 }
1384 
1385 template<typename Key, typename T, typename HashCompare, typename A>
erase(const Key & key)1386 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
1387     node_base *n;
1388     hashcode_t const h = my_hash_compare.hash( key );
1389     hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1390 restart:
1391     {//lock scope
1392         // get bucket
1393         bucket_accessor b( this, h & m );
1394     search:
1395         node_base **p = &b()->node_list;
1396         n = *p;
1397         while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1398             p = &n->next;
1399             n = *p;
1400         }
1401         if( !n ) { // not found, but mask could be changed
1402             if( check_mask_race( h, m ) )
1403                 goto restart;
1404             return false;
1405         }
1406         else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1407             if( check_mask_race( h, m ) ) // contended upgrade, check mask
1408                 goto restart;
1409             goto search;
1410         }
1411         *p = n->next;
1412         my_size--;
1413     }
1414     {
1415         typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1416     }
1417     // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1418     delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1419     return true;
1420 }
1421 
1422 template<typename Key, typename T, typename HashCompare, typename A>
swap(concurrent_hash_map<Key,T,HashCompare,A> & table)1423 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
1424     typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1425     if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1426         using std::swap;
1427         tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1428         swap(this->my_hash_compare, table.my_hash_compare);
1429         internal_swap(table);
1430     }
1431 }
1432 
1433 template<typename Key, typename T, typename HashCompare, typename A>
rehash(size_type sz)1434 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
1435     reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1436     hashcode_t mask = my_mask;
1437     hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1438     __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1439     bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1440     for(; b <= mask; b++, bp++ ) {
1441         node_base *n = bp->node_list;
1442         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1443         __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1444         if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1445             hashcode_t h = b; bucket *b_old = bp;
1446             do {
1447                 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1448                 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1449                 b_old = get_bucket( h &= m );
1450             } while( b_old->node_list == internal::rehash_req );
1451             // now h - is index of the root rehashed bucket b_old
1452             mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1453             for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1454                 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1455                 if( (c & mask) != h ) { // should be rehashed
1456                     *p = q->next; // exclude from b_old
1457                     bucket *b_new = get_bucket( c & mask );
1458                     __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1459                     add_to_bucket( b_new, q );
1460                 } else p = &q->next; // iterate to next item
1461             }
1462         }
1463     }
1464 #if TBB_USE_PERFORMANCE_WARNINGS
1465     int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1466     static bool reported = false;
1467 #endif
1468 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1469     for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1470         if( b & (b-2) ) ++bp; // not the beginning of a segment
1471         else bp = get_bucket( b );
1472         node_base *n = bp->node_list;
1473         __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1474         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1475 #if TBB_USE_PERFORMANCE_WARNINGS
1476         if( n == internal::empty_rehashed ) empty_buckets++;
1477         else if( n->next ) overpopulated_buckets++;
1478 #endif
1479 #if TBB_USE_ASSERT
1480         for( ; is_valid(n); n = n->next ) {
1481             hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1482             __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1483         }
1484 #endif
1485     }
1486 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1487 #if TBB_USE_PERFORMANCE_WARNINGS
1488     if( buckets > current_size) empty_buckets -= buckets - current_size;
1489     else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1490     if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1491         tbb::internal::runtime_warning(
1492             "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d",
1493 #if __TBB_USE_OPTIONAL_RTTI
1494             typeid(*this).name(),
1495 #else
1496             "concurrent_hash_map",
1497 #endif
1498             current_size, empty_buckets, overpopulated_buckets );
1499         reported = true;
1500     }
1501 #endif
1502 }
1503 
1504 template<typename Key, typename T, typename HashCompare, typename A>
clear()1505 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
1506     hashcode_t m = my_mask;
1507     __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1508 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1509 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1510     int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1511     static bool reported = false;
1512 #endif
1513     bucket *bp = 0;
1514     // check consistency
1515     for( segment_index_t b = 0; b <= m; b++ ) {
1516         if( b & (b-2) ) ++bp; // not the beginning of a segment
1517         else bp = get_bucket( b );
1518         node_base *n = bp->node_list;
1519         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1520         __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1521 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1522         if( n == internal::empty_rehashed ) empty_buckets++;
1523         else if( n == internal::rehash_req ) buckets--;
1524         else if( n->next ) overpopulated_buckets++;
1525 #endif
1526 #if __TBB_EXTRA_DEBUG
1527         for(; is_valid(n); n = n->next ) {
1528             hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1529             h &= m;
1530             __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1531         }
1532 #endif
1533     }
1534 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1535 #if __TBB_STATISTICS
1536     printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1537         " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538         current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539         unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1540     my_info_resizes = 0; // concurrent ones
1541     my_info_restarts = 0; // race collisions
1542     my_info_rehashes = 0;  // invocations of rehash_bucket
1543 #endif
1544     if( buckets > current_size) empty_buckets -= buckets - current_size;
1545     else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1546     if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1547         tbb::internal::runtime_warning(
1548             "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d",
1549 #if __TBB_USE_OPTIONAL_RTTI
1550             typeid(*this).name(),
1551 #else
1552             "concurrent_hash_map",
1553 #endif
1554             current_size, empty_buckets, overpopulated_buckets );
1555         reported = true;
1556     }
1557 #endif
1558 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1559     my_size = 0;
1560     segment_index_t s = segment_index_of( m );
1561     __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1562     do {
1563         __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1564         segment_ptr_t buckets_ptr = my_table[s];
1565         size_type sz = segment_size( s ? s : 1 );
1566         for( segment_index_t i = 0; i < sz; i++ )
1567             for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1568                 buckets_ptr[i].node_list = n->next;
1569                 delete_node( n );
1570             }
1571         delete_segment(s, my_allocator);
1572     } while(s-- > 0);
1573     my_mask = embedded_buckets - 1;
1574 }
1575 
1576 template<typename Key, typename T, typename HashCompare, typename A>
internal_copy(const concurrent_hash_map & source)1577 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
1578     hashcode_t mask = source.my_mask;
1579     if( my_mask == mask ) { // optimized version
1580         reserve( source.my_size, my_allocator ); // TODO: load_factor?
1581         bucket *dst = 0, *src = 0;
1582         bool rehash_required = false;
1583         for( hashcode_t k = 0; k <= mask; k++ ) {
1584             if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1585             else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1586             __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1587             node *n = static_cast<node*>( src->node_list );
1588             if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1589                 rehash_required = true;
1590                 dst->node_list = internal::rehash_req;
1591             } else for(; n; n = static_cast<node*>( n->next ) ) {
1592                 node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1593                 add_to_bucket( dst, node_ptr);
1594                 ++my_size; // TODO: replace by non-atomic op
1595             }
1596         }
1597         if( rehash_required ) rehash();
1598     } else internal_copy( source.begin(), source.end(), source.my_size );
1599 }
1600 
1601 template<typename Key, typename T, typename HashCompare, typename A>
1602 template<typename I>
internal_copy(I first,I last,size_type reserve_size)1603 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last, size_type reserve_size) {
1604     reserve( reserve_size, my_allocator ); // TODO: load_factor?
1605     hashcode_t m = my_mask;
1606     for(; first != last; ++first) {
1607         hashcode_t h = my_hash_compare.hash( (*first).first );
1608         bucket *b = get_bucket( h & m );
1609         __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1610         node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611         add_to_bucket( b, node_ptr );
1612         ++my_size; // TODO: replace by non-atomic op
1613     }
1614 }
1615 
1616 } // namespace interface5
1617 
1618 using interface5::concurrent_hash_map;
1619 
1620 
1621 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1622 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
1623     if(a.size() != b.size()) return false;
1624     typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1625     typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1626     for(; i != i_end; ++i) {
1627         j = b.equal_range(i->first).first;
1628         if( j == j_end || !(i->second == j->second) ) return false;
1629     }
1630     return true;
1631 }
1632 
1633 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1634 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
1635 {    return !(a == b); }
1636 
1637 template<typename Key, typename T, typename HashCompare, typename A>
swap(concurrent_hash_map<Key,T,HashCompare,A> & a,concurrent_hash_map<Key,T,HashCompare,A> & b)1638 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
1639 {    a.swap( b ); }
1640 
1641 #if _MSC_VER && !defined(__INTEL_COMPILER)
1642     #pragma warning( pop )
1643 #endif // warning 4127 is back
1644 
1645 } // namespace tbb
1646 
1647 #include "internal/_warning_suppress_disable_notice.h"
1648 #undef __TBB_concurrent_hash_map_H_include_area
1649 
1650 #endif /* __TBB_concurrent_hash_map_H */
1651