1 /*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #ifndef __TBB_concurrent_vector_H
18 #define __TBB_concurrent_vector_H
19
20 #define __TBB_concurrent_vector_H_include_area
21 #include "internal/_warning_suppress_enable_notice.h"
22
23 #include "tbb_stddef.h"
24 #include "tbb_exception.h"
25 #include "atomic.h"
26 #include "cache_aligned_allocator.h"
27 #include "blocked_range.h"
28 #include "tbb_machine.h"
29 #include "tbb_profiling.h"
30 #include <new>
31 #include <cstring> // for memset()
32 #include __TBB_STD_SWAP_HEADER
33 #include <algorithm>
34 #include <iterator>
35
36 #include "internal/_allocator_traits.h"
37
38 #if _MSC_VER==1500 && !__INTEL_COMPILER
39 // VS2008/VC9 seems to have an issue; limits pull in math.h
40 #pragma warning( push )
41 #pragma warning( disable: 4985 )
42 #endif
43 #include <limits> /* std::numeric_limits */
44 #if _MSC_VER==1500 && !__INTEL_COMPILER
45 #pragma warning( pop )
46 #endif
47
48 #if __TBB_INITIALIZER_LISTS_PRESENT
49 #include <initializer_list>
50 #endif
51
52 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
53 // Workaround for overzealous compiler warnings in /Wp64 mode
54 #pragma warning (push)
55 #if defined(_Wp64)
56 #pragma warning (disable: 4267)
57 #endif
58 #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
59 #endif
60
61 namespace tbb {
62
63 template<typename T, class A = cache_aligned_allocator<T> >
64 class concurrent_vector;
65
66 //! @cond INTERNAL
67 namespace internal {
68
69 template<typename Container, typename Value>
70 class vector_iterator;
71
72 //! Bad allocation marker
73 static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
74
75 //! Exception helper function
76 template<typename T>
handle_unconstructed_elements(T * array,size_t n_of_elements)77 void handle_unconstructed_elements(T* array, size_t n_of_elements){
78 std::memset( static_cast<void*>(array), 0, n_of_elements * sizeof( T ) );
79 }
80
81 //! Base class of concurrent vector implementation.
82 /** @ingroup containers */
83 class concurrent_vector_base_v3 {
84 protected:
85
86 // Basic types declarations
87 typedef size_t segment_index_t;
88 typedef size_t size_type;
89
90 // Using enumerations due to Mac linking problems of static const variables
91 enum {
92 // Size constants
93 default_initial_segments = 1, // 2 initial items
94 //! Number of slots for segment pointers inside the class
95 pointers_per_short_table = 3, // to fit into 8 words of entire structure
96 pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
97 };
98
99 struct segment_not_used {};
100 struct segment_allocated {};
101 struct segment_allocation_failed {};
102
103 class segment_t;
104 class segment_value_t {
105 void* array;
106 private:
107 //TODO: More elegant way to grant access to selected functions _only_?
108 friend class segment_t;
segment_value_t(void * an_array)109 explicit segment_value_t(void* an_array):array(an_array) {}
110 public:
111 friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
112 friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;}
113 friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;}
114 template<typename argument_type>
115 friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
116
117 template<typename T>
pointer()118 T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
119 };
120
121 friend void enforce_segment_allocated(segment_value_t const& s, internal::exception_id exception = eid_bad_last_alloc){
122 if(s != segment_allocated()){
123 internal::throw_exception(exception);
124 }
125 }
126
127 // Segment pointer.
128 class segment_t {
129 atomic<void*> array;
130 public:
segment_t()131 segment_t(){ store<relaxed>(segment_not_used());}
132 //Copy ctor and assignment operator are defined to ease using of stl algorithms.
133 //These algorithms usually not a synchronization point, so, semantic is
134 //intentionally relaxed here.
segment_t(segment_t const & rhs)135 segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}
136
swap(segment_t & rhs)137 void swap(segment_t & rhs ){
138 tbb::internal::swap<relaxed>(array, rhs.array);
139 }
140
141 segment_t& operator=(segment_t const& rhs ){
142 array.store<relaxed>(rhs.array.load<relaxed>());
143 return *this;
144 }
145
146 template<memory_semantics M>
load()147 segment_value_t load() const { return segment_value_t(array.load<M>());}
148
149 template<memory_semantics M>
store(segment_not_used)150 void store(segment_not_used) {
151 array.store<M>(0);
152 }
153
154 template<memory_semantics M>
store(segment_allocation_failed)155 void store(segment_allocation_failed) {
156 __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
157 array.store<M>(internal::vector_allocation_error_flag);
158 }
159
160 template<memory_semantics M>
store(void * allocated_segment_pointer)161 void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
162 __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
163 "other overloads of store should be used for marking segment as not_used or allocation_failed" );
164 array.store<M>(allocated_segment_pointer);
165 }
166
167 #if TBB_USE_ASSERT
~segment_t()168 ~segment_t() {
169 __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
170 }
171 #endif /* TBB_USE_ASSERT */
172 };
173 friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
174
175 // Data fields
176
177 //! allocator function pointer
178 void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
179
180 //! count of segments in the first block
181 atomic<size_type> my_first_block;
182
183 //! Requested size of vector
184 atomic<size_type> my_early_size;
185
186 //! Pointer to the segments table
187 atomic<segment_t*> my_segment;
188
189 //! embedded storage of segment pointers
190 segment_t my_storage[pointers_per_short_table];
191
192 // Methods
193
concurrent_vector_base_v3()194 concurrent_vector_base_v3() {
195 //Here the semantic is intentionally relaxed.
196 //The reason this is next:
197 //Object that is in middle of construction (i.e. its constructor is not yet finished)
198 //cannot be used concurrently until the construction is finished.
199 //Thus to flag other threads that construction is finished, some synchronization with
200 //acquire-release semantic should be done by the (external) code that uses the vector.
201 //So, no need to do the synchronization inside the vector.
202
203 my_early_size.store<relaxed>(0);
204 my_first_block.store<relaxed>(0); // here is not default_initial_segments
205 my_segment.store<relaxed>(my_storage);
206 }
207
208 __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();
209
210 //these helpers methods use the fact that segments are allocated so
211 //that every segment size is a (increasing) power of 2.
212 //with one exception 0 segment has size of 2 as well segment 1;
213 //e.g. size of segment with index of 3 is 2^3=8;
segment_index_of(size_type index)214 static segment_index_t segment_index_of( size_type index ) {
215 return segment_index_t( __TBB_Log2( index|1 ) );
216 }
217
segment_base(segment_index_t k)218 static segment_index_t segment_base( segment_index_t k ) {
219 return (segment_index_t(1)<<k & ~segment_index_t(1));
220 }
221
segment_base_index_of(segment_index_t & index)222 static inline segment_index_t segment_base_index_of( segment_index_t &index ) {
223 segment_index_t k = segment_index_of( index );
224 index -= segment_base(k);
225 return k;
226 }
227
segment_size(segment_index_t k)228 static size_type segment_size( segment_index_t k ) {
229 return segment_index_t(1)<<k; // fake value for k==0
230 }
231
232
is_first_element_in_segment(size_type element_index)233 static bool is_first_element_in_segment(size_type element_index){
234 //check if element_index is a power of 2 that is at least 2.
235 //The idea is to detect if the iterator crosses a segment boundary,
236 //and 2 is the minimal index for which it's true
237 __TBB_ASSERT(element_index, "there should be no need to call "
238 "is_first_element_in_segment for 0th element" );
239 return is_power_of_two_at_least( element_index, 2 );
240 }
241
242 //! An operation on an n-element array starting at begin.
243 typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
244
245 //! An operation on n-element destination array and n-element source array.
246 typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
247
248 //! Internal structure for compact()
249 struct internal_segments_table {
250 segment_index_t first_block;
251 segment_t table[pointers_per_long_table];
252 };
253
254 void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
255 size_type __TBB_EXPORTED_METHOD internal_capacity() const;
256 void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
257 size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
258 void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
259 segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
260 void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
261 void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );
262 void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,
263 internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
264 //! Obsolete
265 void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;
266 void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);
267
268 void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
269 internal_array_op1 destroy, internal_array_op2 init );
270 size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
271
272 //! Deprecated entry point for backwards compatibility to TBB 2.1.
273 void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
274 private:
275 //! Private functionality
276 class helper;
277 friend class helper;
278
279 template<typename Container, typename Value>
280 friend class vector_iterator;
281
282 };
283
swap(concurrent_vector_base_v3::segment_t & lhs,concurrent_vector_base_v3::segment_t & rhs)284 inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) {
285 lhs.swap(rhs);
286 }
287
288 typedef concurrent_vector_base_v3 concurrent_vector_base;
289
290 //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/
291 /** Value is either the T or const T type of the container.
292 @ingroup containers */
293 template<typename Container, typename Value>
294 class vector_iterator
295 {
296 //! concurrent_vector over which we are iterating.
297 Container* my_vector;
298
299 //! Index into the vector
300 size_t my_index;
301
302 //! Caches my_vector->internal_subscript(my_index)
303 /** NULL if cached value is not available */
304 mutable Value* my_item;
305
306 template<typename C, typename T>
307 friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
308
309 template<typename C, typename T, typename U>
310 friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
311
312 template<typename C, typename T, typename U>
313 friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
314
315 template<typename C, typename T, typename U>
316 friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
317
318 template<typename C, typename U>
319 friend class internal::vector_iterator;
320
321 #if !__TBB_TEMPLATE_FRIENDS_BROKEN
322 template<typename T, class A>
323 friend class tbb::concurrent_vector;
324 #else
325 public:
326 #endif
327
328 vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
my_vector(const_cast<Container * > (& vector))329 my_vector(const_cast<Container*>(&vector)),
330 my_index(index),
331 my_item(static_cast<Value*>(ptr))
332 {}
333
334 public:
335 //! Default constructor
vector_iterator()336 vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
337
vector_iterator(const vector_iterator<Container,typename Container::value_type> & other)338 vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :
339 my_vector(other.my_vector),
340 my_index(other.my_index),
341 my_item(other.my_item)
342 {}
343
344 vector_iterator& operator=( const vector_iterator<Container,typename Container::value_type>& other )
345 {
346 my_vector=other.my_vector;
347 my_index=other.my_index;
348 my_item=other.my_item;
349 return *this;
350 }
351
352 vector_iterator operator+( ptrdiff_t offset ) const {
353 return vector_iterator( *my_vector, my_index+offset );
354 }
355 vector_iterator &operator+=( ptrdiff_t offset ) {
356 my_index+=offset;
357 my_item = NULL;
358 return *this;
359 }
360 vector_iterator operator-( ptrdiff_t offset ) const {
361 return vector_iterator( *my_vector, my_index-offset );
362 }
363 vector_iterator &operator-=( ptrdiff_t offset ) {
364 my_index-=offset;
365 my_item = NULL;
366 return *this;
367 }
368 Value& operator*() const {
369 Value* item = my_item;
370 if( !item ) {
371 item = my_item = &my_vector->internal_subscript(my_index);
372 }
373 __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
374 return *item;
375 }
376 Value& operator[]( ptrdiff_t k ) const {
377 return my_vector->internal_subscript(my_index+k);
378 }
379 Value* operator->() const {return &operator*();}
380
381 //! Pre increment
382 vector_iterator& operator++() {
383 size_t element_index = ++my_index;
384 if( my_item ) {
385 //TODO: consider using of knowledge about "first_block optimization" here as well?
386 if( concurrent_vector_base::is_first_element_in_segment(element_index)) {
387 //if the iterator crosses a segment boundary, the pointer become invalid
388 //as possibly next segment is in another memory location
389 my_item= NULL;
390 } else {
391 ++my_item;
392 }
393 }
394 return *this;
395 }
396
397 //! Pre decrement
398 vector_iterator& operator--() {
399 __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
400 size_t element_index = my_index--;
401 if( my_item ) {
402 if(concurrent_vector_base::is_first_element_in_segment(element_index)) {
403 //if the iterator crosses a segment boundary, the pointer become invalid
404 //as possibly next segment is in another memory location
405 my_item= NULL;
406 } else {
407 --my_item;
408 }
409 }
410 return *this;
411 }
412
413 //! Post increment
414 vector_iterator operator++(int) {
415 vector_iterator result = *this;
416 operator++();
417 return result;
418 }
419
420 //! Post decrement
421 vector_iterator operator--(int) {
422 vector_iterator result = *this;
423 operator--();
424 return result;
425 }
426
427 // STL support
428
429 typedef ptrdiff_t difference_type;
430 typedef Value value_type;
431 typedef Value* pointer;
432 typedef Value& reference;
433 typedef std::random_access_iterator_tag iterator_category;
434 };
435
436 template<typename Container, typename T>
437 vector_iterator<Container,T> operator+( ptrdiff_t offset, const vector_iterator<Container,T>& v ) {
438 return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
439 }
440
441 template<typename Container, typename T, typename U>
442 bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
443 return i.my_index==j.my_index && i.my_vector == j.my_vector;
444 }
445
446 template<typename Container, typename T, typename U>
447 bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
448 return !(i==j);
449 }
450
451 template<typename Container, typename T, typename U>
452 bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
453 return i.my_index<j.my_index;
454 }
455
456 template<typename Container, typename T, typename U>
457 bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
458 return j<i;
459 }
460
461 template<typename Container, typename T, typename U>
462 bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
463 return !(i<j);
464 }
465
466 template<typename Container, typename T, typename U>
467 bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
468 return !(j<i);
469 }
470
471 template<typename Container, typename T, typename U>
472 ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
473 return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
474 }
475
476 template<typename T, class A>
477 class allocator_base {
478 public:
479 typedef typename tbb::internal::allocator_rebind<A, T>::type allocator_type;
480 allocator_type my_allocator;
my_allocator(a)481 allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}
482 };
483
484 } // namespace internal
485 //! @endcond
486
487 //! Concurrent vector container
488 /** concurrent_vector is a container having the following main properties:
489 - It provides random indexed access to its elements. The index of the first element is 0.
490 - It ensures safe concurrent growing its size (different threads can safely append new elements).
491 - Adding new elements does not invalidate existing iterators and does not change indices of existing items.
492
493 @par Compatibility
494 The class meets all Container Requirements and Reversible Container Requirements from
495 C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet
496 Sequence Requirements due to absence of insert() and erase() methods.
497
498 @par Exception Safety
499 Methods working with memory allocation and/or new elements construction can throw an
500 exception if allocator fails to allocate memory or element's default constructor throws one.
501 Concurrent vector's element of type T must conform to the following requirements:
502 - Throwing an exception is forbidden for destructor of T.
503 - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized.
504 .
505 Otherwise, the program's behavior is undefined.
506 @par
507 If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation.
508 Invalid state means:
509 - There are no guarantees that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens.
510 - An invalid vector instance cannot be repaired; it is unable to grow anymore.
511 - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful.
512 - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown.
513 .
514 If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails.
515
516 @par Fragmentation
517 Unlike an STL vector, a concurrent_vector does not move existing elements if it needs
518 to allocate more memory. The container is divided into a series of contiguous arrays of
519 elements. The first reservation, growth, or assignment operation determines the size of
520 the first array. Using small number of elements as initial size incurs fragmentation that
521 may increase element access time. Internal layout can be optimized by method compact() that
522 merges several smaller arrays into one solid.
523
524 @par Changes since TBB 2.1
525 - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated.
526 - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector
527 - Added resize() methods (not thread-safe)
528 - Added cbegin/cend/crbegin/crend methods
529 - Changed return type of methods grow* and push_back to iterator
530
531 @par Changes since TBB 2.0
532 - Implemented exception-safety guarantees
533 - Added template argument for allocator
534 - Added allocator argument in constructors
535 - Faster index calculation
536 - First growth call specifies a number of segments to be merged in the first allocation.
537 - Fixed memory blow up for swarm of vector's instances of small size
538 - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items.
539 - Added STL-like constructors.
540 - Added operators ==, < and derivatives
541 - Added at() method, approved for using after an exception was thrown inside the vector
542 - Added get_allocator() method.
543 - Added assign() methods
544 - Added compact() method to defragment first segments
545 - Added swap() method
546 - range() defaults on grainsize = 1 supporting auto grainsize algorithms.
547
548 @ingroup containers */
549 template<typename T, class A>
550 class concurrent_vector: protected internal::allocator_base<T, A>,
551 private internal::concurrent_vector_base {
552 private:
553 template<typename I>
554 class generic_range_type: public blocked_range<I> {
555 public:
556 typedef T value_type;
557 typedef T& reference;
558 typedef const T& const_reference;
559 typedef I iterator;
560 typedef ptrdiff_t difference_type;
561 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
562 template<typename U>
generic_range_type(const generic_range_type<U> & r)563 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
generic_range_type(generic_range_type & r,split)564 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
565 };
566
567 template<typename C, typename U>
568 friend class internal::vector_iterator;
569
570 public:
571 //------------------------------------------------------------------------
572 // STL compatible types
573 //------------------------------------------------------------------------
574 typedef internal::concurrent_vector_base_v3::size_type size_type;
575 typedef typename internal::allocator_base<T, A>::allocator_type allocator_type;
576
577 typedef T value_type;
578 typedef ptrdiff_t difference_type;
579 typedef T& reference;
580 typedef const T& const_reference;
581 typedef T *pointer;
582 typedef const T *const_pointer;
583
584 typedef internal::vector_iterator<concurrent_vector,T> iterator;
585 typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
586
587 #if !defined(_MSC_VER) || _CPPLIB_VER>=300
588 // Assume ISO standard definition of std::reverse_iterator
589 typedef std::reverse_iterator<iterator> reverse_iterator;
590 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
591 #else
592 // Use non-standard std::reverse_iterator
593 typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
594 typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
595 #endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
596
597 //------------------------------------------------------------------------
598 // Parallel algorithm support
599 //------------------------------------------------------------------------
600 typedef generic_range_type<iterator> range_type;
601 typedef generic_range_type<const_iterator> const_range_type;
602
603 //------------------------------------------------------------------------
604 // STL compatible constructors & destructors
605 //------------------------------------------------------------------------
606
607 //! Construct empty vector.
608 explicit concurrent_vector(const allocator_type &a = allocator_type())
609 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
610 {
611 vector_allocator_ptr = &internal_allocator;
612 }
613
614 //Constructors are not required to have synchronization
615 //(for more details see comment in the concurrent_vector_base constructor).
616 #if __TBB_INITIALIZER_LISTS_PRESENT
617 //! Constructor from initializer_list
618 concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
619 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
620 {
621 vector_allocator_ptr = &internal_allocator;
622 __TBB_TRY {
623 internal_assign_iterators(init_list.begin(), init_list.end());
__TBB_CATCH(...)624 } __TBB_CATCH(...) {
625 segment_t *table = my_segment.load<relaxed>();;
626 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
627 __TBB_RETHROW();
628 }
629
630 }
631 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
632
633 //! Copying constructor
634 concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )
635 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
636 {
637 vector_allocator_ptr = &internal_allocator;
638 __TBB_TRY {
639 internal_copy(vector, sizeof(T), ©_array);
__TBB_CATCH(...)640 } __TBB_CATCH(...) {
641 segment_t *table = my_segment.load<relaxed>();
642 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
643 __TBB_RETHROW();
644 }
645 }
646
647 #if __TBB_CPP11_RVALUE_REF_PRESENT
648 //! Move constructor
649 //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
concurrent_vector(concurrent_vector && source)650 concurrent_vector( concurrent_vector&& source)
651 : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()
652 {
653 vector_allocator_ptr = &internal_allocator;
654 concurrent_vector_base_v3::internal_swap(source);
655 }
656
concurrent_vector(concurrent_vector && source,const allocator_type & a)657 concurrent_vector( concurrent_vector&& source, const allocator_type& a)
658 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
659 {
660 vector_allocator_ptr = &internal_allocator;
661 //C++ standard requires instances of an allocator being compared for equality,
662 //which means that memory allocated by one instance is possible to deallocate with the other one.
663 if (a == source.my_allocator) {
664 concurrent_vector_base_v3::internal_swap(source);
665 } else {
666 __TBB_TRY {
667 internal_copy(source, sizeof(T), &move_array);
668 } __TBB_CATCH(...) {
669 segment_t *table = my_segment.load<relaxed>();
670 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
671 __TBB_RETHROW();
672 }
673 }
674 }
675
676 #endif
677
678 //! Copying constructor for vector with different allocator type
679 template<class M>
680 __TBB_DEPRECATED concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )
681 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
682 {
683 vector_allocator_ptr = &internal_allocator;
684 __TBB_TRY {
685 internal_copy(vector.internal_vector_base(), sizeof(T), ©_array);
__TBB_CATCH(...)686 } __TBB_CATCH(...) {
687 segment_t *table = my_segment.load<relaxed>();
688 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
689 __TBB_RETHROW();
690 }
691 }
692
693 //! Construction with initial size specified by argument n
concurrent_vector(size_type n)694 explicit concurrent_vector(size_type n)
695 {
696 vector_allocator_ptr = &internal_allocator;
697 __TBB_TRY {
698 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
699 } __TBB_CATCH(...) {
700 segment_t *table = my_segment.load<relaxed>();
701 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
702 __TBB_RETHROW();
703 }
704 }
705
706 //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance
707 concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())
708 : internal::allocator_base<T, A>(a)
709 {
710 vector_allocator_ptr = &internal_allocator;
711 __TBB_TRY {
712 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
__TBB_CATCH(...)713 } __TBB_CATCH(...) {
714 segment_t *table = my_segment.load<relaxed>();
715 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
716 __TBB_RETHROW();
717 }
718 }
719
720 //! Construction with copying iteration range and given allocator instance
721 template<class I>
722 concurrent_vector(I first, I last, const allocator_type &a = allocator_type())
723 : internal::allocator_base<T, A>(a)
724 {
725 vector_allocator_ptr = &internal_allocator;
726 __TBB_TRY {
727 internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
__TBB_CATCH(...)728 } __TBB_CATCH(...) {
729 segment_t *table = my_segment.load<relaxed>();
730 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
731 __TBB_RETHROW();
732 }
733 }
734
735 //! Assignment
736 concurrent_vector& operator=( const concurrent_vector& vector ) {
737 if( this != &vector )
738 internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array);
739 return *this;
740 }
741
742 #if __TBB_CPP11_RVALUE_REF_PRESENT
743 //TODO: add __TBB_NOEXCEPT()
744 //! Move assignment
745 concurrent_vector& operator=( concurrent_vector&& other ) {
746 __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
747 typedef typename tbb::internal::allocator_traits<A>::propagate_on_container_move_assignment pocma_t;
748 if(pocma_t::value || this->my_allocator == other.my_allocator) {
749 concurrent_vector trash (std::move(*this));
750 internal_swap(other);
751 tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t());
752 } else {
753 internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array);
754 }
755 return *this;
756 }
757 #endif
758 //TODO: add an template assignment operator? (i.e. with different element type)
759
760 //! Assignment for vector with different allocator type
761 template<class M>
762 __TBB_DEPRECATED concurrent_vector& operator=( const concurrent_vector<T, M>& vector ) {
763 if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
764 internal_assign(vector.internal_vector_base(),
765 sizeof(T), &destroy_array, &assign_array, ©_array);
766 return *this;
767 }
768
769 #if __TBB_INITIALIZER_LISTS_PRESENT
770 //! Assignment for initializer_list
771 concurrent_vector& operator=( std::initializer_list<T> init_list ) {
772 internal_clear(&destroy_array);
773 internal_assign_iterators(init_list.begin(), init_list.end());
774 return *this;
775 }
776 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
777
778 //------------------------------------------------------------------------
779 // Concurrent operations
780 //------------------------------------------------------------------------
781 //! Grow by "delta" elements.
782 /** Returns iterator pointing to the first new element. */
grow_by(size_type delta)783 iterator grow_by( size_type delta ) {
784 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
785 }
786
787 //! Grow by "delta" elements using copying constructor.
788 /** Returns iterator pointing to the first new element. */
grow_by(size_type delta,const_reference t)789 iterator grow_by( size_type delta, const_reference t ) {
790 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
791 }
792
793 /** Returns iterator pointing to the first new element. */
794 template<typename I>
grow_by(I first,I last)795 iterator grow_by( I first, I last ) {
796 typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
797 __TBB_ASSERT( delta >= 0, NULL);
798
799 return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), ©_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
800 }
801
802 #if __TBB_INITIALIZER_LISTS_PRESENT
803 /** Returns iterator pointing to the first new element. */
grow_by(std::initializer_list<T> init_list)804 iterator grow_by( std::initializer_list<T> init_list ) {
805 return grow_by( init_list.begin(), init_list.end() );
806 }
807 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
808
809 //! Append minimal sequence of elements such that size()>=n.
810 /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated.
811 May return while other elements are being constructed by other threads.
812 Returns iterator that points to beginning of appended sequence.
813 If no elements were appended, returns iterator pointing to nth element. */
grow_to_at_least(size_type n)814 iterator grow_to_at_least( size_type n ) {
815 size_type m=0;
816 if( n ) {
817 m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
818 if( m>n ) m=n;
819 }
820 return iterator(*this, m);
821 };
822
823 /** Analogous to grow_to_at_least( size_type n ) with exception that the new
824 elements are initialized by copying of t instead of default construction. */
grow_to_at_least(size_type n,const_reference t)825 iterator grow_to_at_least( size_type n, const_reference t ) {
826 size_type m=0;
827 if( n ) {
828 m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t);
829 if( m>n ) m=n;
830 }
831 return iterator(*this, m);
832 };
833
834 //! Push item
835 /** Returns iterator pointing to the new element. */
push_back(const_reference item)836 iterator push_back( const_reference item )
837 {
838 push_back_helper prolog(*this);
839 new(prolog.internal_push_back_result()) T(item);
840 return prolog.return_iterator_and_dismiss();
841 }
842
843 #if __TBB_CPP11_RVALUE_REF_PRESENT
844 //! Push item, move-aware
845 /** Returns iterator pointing to the new element. */
push_back(T && item)846 iterator push_back( T&& item )
847 {
848 push_back_helper prolog(*this);
849 new(prolog.internal_push_back_result()) T(std::move(item));
850 return prolog.return_iterator_and_dismiss();
851 }
852 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
853 //! Push item, create item "in place" with provided arguments
854 /** Returns iterator pointing to the new element. */
855 template<typename... Args>
emplace_back(Args &&...args)856 iterator emplace_back( Args&&... args )
857 {
858 push_back_helper prolog(*this);
859 new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
860 return prolog.return_iterator_and_dismiss();
861 }
862 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
863 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
864 //! Get reference to element at given index.
865 /** This method is thread-safe for concurrent reads, and also while growing the vector,
866 as long as the calling thread has checked that index < size(). */
867 reference operator[]( size_type index ) {
868 return internal_subscript(index);
869 }
870
871 //! Get const reference to element at given index.
872 const_reference operator[]( size_type index ) const {
873 return internal_subscript(index);
874 }
875
876 //! Get reference to element at given index. Throws exceptions on errors.
at(size_type index)877 reference at( size_type index ) {
878 return internal_subscript_with_exceptions(index);
879 }
880
881 //! Get const reference to element at given index. Throws exceptions on errors.
at(size_type index)882 const_reference at( size_type index ) const {
883 return internal_subscript_with_exceptions(index);
884 }
885
886 //! Get range for iterating with parallel algorithms
887 range_type range( size_t grainsize = 1 ) {
888 return range_type( begin(), end(), grainsize );
889 }
890
891 //! Get const range for iterating with parallel algorithms
892 const_range_type range( size_t grainsize = 1 ) const {
893 return const_range_type( begin(), end(), grainsize );
894 }
895
896 //------------------------------------------------------------------------
897 // Capacity
898 //------------------------------------------------------------------------
899 //! Return size of vector. It may include elements under construction
size()900 size_type size() const {
901 size_type sz = my_early_size, cp = internal_capacity();
902 return cp < sz ? cp : sz;
903 }
904
905 //! Return false if vector is not empty or has elements under construction at least.
empty()906 bool empty() const {return !my_early_size;}
907
908 //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value.
capacity()909 size_type capacity() const {return internal_capacity();}
910
911 //! Allocate enough space to grow to size n without having to allocate more memory later.
912 /** Like most of the methods provided for STL compatibility, this method is *not* thread safe.
913 The capacity afterwards may be bigger than the requested reservation. */
reserve(size_type n)914 void reserve( size_type n ) {
915 if( n )
916 internal_reserve(n, sizeof(T), max_size());
917 }
918
919 //! Resize the vector. Not thread-safe.
resize(size_type n)920 void resize( size_type n ) {
921 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
922 }
923
924 //! Resize the vector, copy t for new elements. Not thread-safe.
resize(size_type n,const_reference t)925 void resize( size_type n, const_reference t ) {
926 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
927 }
928
929 //! Optimize memory usage and fragmentation.
930 void shrink_to_fit();
931
932 //! Upper bound on argument to reserve.
max_size()933 size_type max_size() const {return (~size_type(0))/sizeof(T);}
934
935 //------------------------------------------------------------------------
936 // STL support
937 //------------------------------------------------------------------------
938
939 //! start iterator
begin()940 iterator begin() {return iterator(*this,0);}
941 //! end iterator
end()942 iterator end() {return iterator(*this,size());}
943 //! start const iterator
begin()944 const_iterator begin() const {return const_iterator(*this,0);}
945 //! end const iterator
end()946 const_iterator end() const {return const_iterator(*this,size());}
947 //! start const iterator
cbegin()948 const_iterator cbegin() const {return const_iterator(*this,0);}
949 //! end const iterator
cend()950 const_iterator cend() const {return const_iterator(*this,size());}
951 //! reverse start iterator
rbegin()952 reverse_iterator rbegin() {return reverse_iterator(end());}
953 //! reverse end iterator
rend()954 reverse_iterator rend() {return reverse_iterator(begin());}
955 //! reverse start const iterator
rbegin()956 const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
957 //! reverse end const iterator
rend()958 const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
959 //! reverse start const iterator
crbegin()960 const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}
961 //! reverse end const iterator
crend()962 const_reverse_iterator crend() const {return const_reverse_iterator(begin());}
963 //! the first item
front()964 reference front() {
965 __TBB_ASSERT( size()>0, NULL);
966 const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
967 return (segment_value.template pointer<T>())[0];
968 }
969 //! the first item const
front()970 const_reference front() const {
971 __TBB_ASSERT( size()>0, NULL);
972 const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
973 return (segment_value.template pointer<const T>())[0];
974 }
975 //! the last item
back()976 reference back() {
977 __TBB_ASSERT( size()>0, NULL);
978 return internal_subscript( size()-1 );
979 }
980 //! the last item const
back()981 const_reference back() const {
982 __TBB_ASSERT( size()>0, NULL);
983 return internal_subscript( size()-1 );
984 }
985 //! return allocator object
get_allocator()986 allocator_type get_allocator() const { return this->my_allocator; }
987
988 //! assign n items by copying t item
assign(size_type n,const_reference t)989 void assign(size_type n, const_reference t) {
990 clear();
991 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
992 }
993
994 //! assign range [first, last)
995 template<class I>
assign(I first,I last)996 void assign(I first, I last) {
997 clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
998 }
999
1000 #if __TBB_INITIALIZER_LISTS_PRESENT
1001 //! assigns an initializer list
assign(std::initializer_list<T> init_list)1002 void assign(std::initializer_list<T> init_list) {
1003 clear(); internal_assign_iterators( init_list.begin(), init_list.end());
1004 }
1005 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
1006
1007 //! swap two instances
swap(concurrent_vector & vector)1008 void swap(concurrent_vector &vector) {
1009 typedef typename tbb::internal::allocator_traits<A>::propagate_on_container_swap pocs_t;
1010 if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) {
1011 concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
1012 tbb::internal::allocator_swap(this->my_allocator, vector.my_allocator, pocs_t());
1013 }
1014 }
1015
1016 //! Clear container while keeping memory allocated.
1017 /** To free up the memory, use in conjunction with method compact(). Not thread safe **/
clear()1018 void clear() {
1019 internal_clear(&destroy_array);
1020 }
1021
1022 //! Clear and destroy vector.
~concurrent_vector()1023 ~concurrent_vector() {
1024 segment_t *table = my_segment.load<relaxed>();
1025 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
1026 // base class destructor call should be then
1027 }
1028
internal_vector_base()1029 const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
1030 private:
1031 //! Allocate k items
internal_allocator(internal::concurrent_vector_base_v3 & vb,size_t k)1032 static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
1033 return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1034 }
1035 //! Free k segments from table
1036 void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);
1037
1038 //! Get reference to element at given index.
1039 T& internal_subscript( size_type index ) const;
1040
1041 //! Get reference to element at given index with errors checks
1042 T& internal_subscript_with_exceptions( size_type index ) const;
1043
1044 //! assign n items by copying t
internal_assign_n(size_type n,const_pointer p)1045 void internal_assign_n(size_type n, const_pointer p) {
1046 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1047 }
1048
1049 //! True/false function override helper
1050 /* Functions declarations:
1051 * void foo(is_integer_tag<true>*);
1052 * void foo(is_integer_tag<false>*);
1053 * Usage example:
1054 * foo(static_cast<is_integer_tag<std::numeric_limits<T>::is_integer>*>(0));
1055 */
1056 template<bool B> class is_integer_tag;
1057
1058 //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9
1059 template<class I>
internal_assign_range(I first,I last,is_integer_tag<true> *)1060 void internal_assign_range(I first, I last, is_integer_tag<true> *) {
1061 internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1062 }
1063 //! inline proxy assign by iterators
1064 template<class I>
internal_assign_range(I first,I last,is_integer_tag<false> *)1065 void internal_assign_range(I first, I last, is_integer_tag<false> *) {
1066 internal_assign_iterators(first, last);
1067 }
1068 //! assign by iterators
1069 template<class I>
1070 void internal_assign_iterators(I first, I last);
1071
1072 //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1073
1074 //! Construct n instances of T, starting at "begin".
1075 static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1076
1077 //! Copy-construct n instances of T, starting at "begin".
1078 static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1079
1080 //! Copy-construct n instances of T by copying single element pointed to by src, starting at "dst".
1081 static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1082
1083 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1084 //! Either opy or move-construct n instances of T, starting at "dst" by copying according element of src array.
1085 static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1086 #endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1087
1088 #if __TBB_CPP11_RVALUE_REF_PRESENT
1089 //! Move-construct n instances of T, starting at "dst" by copying according element of src array.
1090 static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1091
1092 //! Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array.
1093 static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1094 #endif
1095 //! Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n).
1096 template<typename Iterator>
1097 static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1098
1099 //! Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array.
1100 static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1101
1102 //! Destroy n instances of T, starting at "begin".
1103 static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
1104
1105 //! Exception-aware helper class for filling a segment by exception-danger operators of user class
1106 class internal_loop_guide : internal::no_copy {
1107 public:
1108 const pointer array;
1109 const size_type n;
1110 size_type i;
1111
as_const_pointer(const void * ptr)1112 static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
as_pointer(const void * src)1113 static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1114
internal_loop_guide(size_type ntrials,void * ptr)1115 internal_loop_guide(size_type ntrials, void *ptr)
1116 : array(as_pointer(ptr)), n(ntrials), i(0) {}
init()1117 void init() { for(; i < n; ++i) new( &array[i] ) T(); }
init(const void * src)1118 void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
copy(const void * src)1119 void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
assign(const void * src)1120 void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1121 #if __TBB_CPP11_RVALUE_REF_PRESENT
move_assign(const void * src)1122 void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
move_construct(const void * src)1123 void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1124 #endif
1125 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
move_construct_if_noexcept(const void * src)1126 void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1127 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1128
1129 //TODO: rename to construct_range
iterate(I & src)1130 template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
~internal_loop_guide()1131 ~internal_loop_guide() {
1132 if(i < n) {// if an exception was raised, fill the rest of items with zeros
1133 internal::handle_unconstructed_elements(array+i, n-i);
1134 }
1135 }
1136 };
1137
1138 struct push_back_helper : internal::no_copy{
1139 struct element_construction_guard : internal::no_copy{
1140 pointer element;
1141
element_construction_guardpush_back_helper::element_construction_guard1142 element_construction_guard(pointer an_element) : element (an_element){}
dismisspush_back_helper::element_construction_guard1143 void dismiss(){ element = NULL; }
~element_construction_guardpush_back_helper::element_construction_guard1144 ~element_construction_guard(){
1145 if (element){
1146 internal::handle_unconstructed_elements(element, 1);
1147 }
1148 }
1149 };
1150
1151 concurrent_vector & v;
1152 size_type k;
1153 element_construction_guard g;
1154
push_back_helperpush_back_helper1155 push_back_helper(concurrent_vector & vector) :
1156 v(vector),
1157 g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1158 {}
1159
internal_push_back_resultpush_back_helper1160 pointer internal_push_back_result(){ return g.element;}
return_iterator_and_dismisspush_back_helper1161 iterator return_iterator_and_dismiss(){
1162 pointer ptr = g.element;
1163 g.dismiss();
1164 return iterator(v, k, ptr);
1165 }
1166 };
1167 };
1168
1169 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1170 // Deduction guide for the constructor from two iterators
1171 template<typename I,
1172 typename T = typename std::iterator_traits<I>::value_type,
1173 typename A = cache_aligned_allocator<T>
1174 > concurrent_vector(I, I, const A& = A())
1175 -> concurrent_vector<T, A>;
1176
1177 // Deduction guide for the constructor from a vector and allocator
1178 template<typename T, typename A1, typename A2>
1179 concurrent_vector(const concurrent_vector<T, A1> &, const A2 &)
1180 -> concurrent_vector<T, A2>;
1181
1182 // Deduction guide for the constructor from an initializer_list
1183 template<typename T, typename A = cache_aligned_allocator<T>
1184 > concurrent_vector(std::initializer_list<T>, const A& = A())
1185 -> concurrent_vector<T, A>;
1186 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1187
1188 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1189 #pragma warning (push)
1190 #pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1191 #endif
1192 template<typename T, class A>
shrink_to_fit()1193 void concurrent_vector<T, A>::shrink_to_fit() {
1194 internal_segments_table old;
1195 __TBB_TRY {
1196 internal_array_op2 copy_or_move_array =
1197 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1198 &move_array_if_noexcept
1199 #else
1200 ©_array
1201 #endif
1202 ;
1203 if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1204 internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1205 } __TBB_CATCH(...) {
1206 if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1207 internal_free_segments( old.table, 1, old.first_block );
1208 __TBB_RETHROW();
1209 }
1210 }
1211 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1212 #pragma warning (pop)
1213 #endif // warning 4701 is back
1214
1215 template<typename T, class A>
internal_free_segments(segment_t table[],segment_index_t k,segment_index_t first_block)1216 void concurrent_vector<T, A>::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) {
1217 // Free the arrays
1218 while( k > first_block ) {
1219 --k;
1220 segment_value_t segment_value = table[k].load<relaxed>();
1221 table[k].store<relaxed>(segment_not_used());
1222 if( segment_value == segment_allocated() ) // check for correct segment pointer
1223 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1224 }
1225 segment_value_t segment_value = table[0].load<relaxed>();
1226 if( segment_value == segment_allocated() ) {
1227 __TBB_ASSERT( first_block > 0, NULL );
1228 while(k > 0) table[--k].store<relaxed>(segment_not_used());
1229 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1230 }
1231 }
1232
1233 template<typename T, class A>
internal_subscript(size_type index)1234 T& concurrent_vector<T, A>::internal_subscript( size_type index ) const {
1235 //TODO: unify both versions of internal_subscript
1236 __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1237 size_type j = index;
1238 segment_index_t k = segment_base_index_of( j );
1239 __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1240 //no need in load with acquire (load<acquire>) since thread works in own space or gets
1241 //the information about added elements via some form of external synchronization
1242 //TODO: why not make a load of my_segment relaxed as well ?
1243 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1244 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1245 __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1246 __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1247 return (( segment_value.pointer<T>()))[j];
1248 }
1249
1250 template<typename T, class A>
internal_subscript_with_exceptions(size_type index)1251 T& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type index ) const {
1252 if( index >= my_early_size )
1253 internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1254 size_type j = index;
1255 segment_index_t k = segment_base_index_of( j );
1256 //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1257 if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1258 internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error
1259 // no need in load with acquire (load<acquire>) since thread works in own space or gets
1260 //the information about added elements via some form of external synchronization
1261 //TODO: why not make a load of my_segment relaxed as well ?
1262 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1263 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1264 enforce_segment_allocated(segment_value, internal::eid_index_range_error);
1265 return (segment_value.pointer<T>())[j];
1266 }
1267
1268 template<typename T, class A> template<class I>
internal_assign_iterators(I first,I last)1269 void concurrent_vector<T, A>::internal_assign_iterators(I first, I last) {
1270 __TBB_ASSERT(my_early_size == 0, NULL);
1271 size_type n = std::distance(first, last);
1272 if( !n ) return;
1273 internal_reserve(n, sizeof(T), max_size());
1274 my_early_size = n;
1275 segment_index_t k = 0;
1276 //TODO: unify segment iteration code with concurrent_base_v3::helper
1277 size_type sz = segment_size( my_first_block );
1278 while( sz < n ) {
1279 internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1280 loop.iterate(first);
1281 n -= sz;
1282 if( !k ) k = my_first_block;
1283 else { ++k; sz <<= 1; }
1284 }
1285 internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1286 loop.iterate(first);
1287 }
1288
1289 template<typename T, class A>
initialize_array(void * begin,const void *,size_type n)1290 void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
1291 internal_loop_guide loop(n, begin); loop.init();
1292 }
1293
1294 template<typename T, class A>
initialize_array_by(void * begin,const void * src,size_type n)1295 void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
1296 internal_loop_guide loop(n, begin); loop.init(src);
1297 }
1298
1299 template<typename T, class A>
copy_array(void * dst,const void * src,size_type n)1300 void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1301 internal_loop_guide loop(n, dst); loop.copy(src);
1302 }
1303
1304 #if __TBB_CPP11_RVALUE_REF_PRESENT
1305 template<typename T, class A>
move_array(void * dst,const void * src,size_type n)1306 void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1307 internal_loop_guide loop(n, dst); loop.move_construct(src);
1308 }
1309 template<typename T, class A>
move_assign_array(void * dst,const void * src,size_type n)1310 void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1311 internal_loop_guide loop(n, dst); loop.move_assign(src);
1312 }
1313 #endif
1314
1315 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1316 template<typename T, class A>
move_array_if_noexcept(void * dst,const void * src,size_type n)1317 void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1318 internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1319 }
1320 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1321
1322 template<typename T, class A>
1323 template<typename I>
copy_range(void * dst,const void * p_type_erased_iterator,size_type n)1324 void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1325 internal_loop_guide loop(n, dst);
1326 loop.iterate( *(static_cast<I*>(const_cast<void*>(p_type_erased_iterator))) );
1327 }
1328
1329 template<typename T, class A>
assign_array(void * dst,const void * src,size_type n)1330 void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1331 internal_loop_guide loop(n, dst); loop.assign(src);
1332 }
1333
1334 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1335 // Workaround for overzealous compiler warning
1336 #pragma warning (push)
1337 #pragma warning (disable: 4189)
1338 #endif
1339 template<typename T, class A>
destroy_array(void * begin,size_type n)1340 void concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {
1341 T* array = static_cast<T*>(begin);
1342 for( size_type j=n; j>0; --j )
1343 array[j-1].~T(); // destructors are supposed to not throw any exceptions
1344 }
1345 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1346 #pragma warning (pop)
1347 #endif // warning 4189 is back
1348
1349 // concurrent_vector's template functions
1350 template<typename T, class A1, class A2>
1351 inline bool operator==(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b) {
1352 //TODO: call size() only once per vector (in operator==)
1353 // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1354 if(a.size() != b.size()) return false;
1355 typename concurrent_vector<T, A1>::const_iterator i(a.begin());
1356 typename concurrent_vector<T, A2>::const_iterator j(b.begin());
1357 for(; i != a.end(); ++i, ++j)
1358 if( !(*i == *j) ) return false;
1359 return true;
1360 }
1361
1362 template<typename T, class A1, class A2>
1363 inline bool operator!=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1364 { return !(a == b); }
1365
1366 template<typename T, class A1, class A2>
1367 inline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1368 { return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1369
1370 template<typename T, class A1, class A2>
1371 inline bool operator>(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1372 { return b < a; }
1373
1374 template<typename T, class A1, class A2>
1375 inline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1376 { return !(b < a); }
1377
1378 template<typename T, class A1, class A2>
1379 inline bool operator>=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1380 { return !(a < b); }
1381
1382 template<typename T, class A>
swap(concurrent_vector<T,A> & a,concurrent_vector<T,A> & b)1383 inline void swap(concurrent_vector<T, A> &a, concurrent_vector<T, A> &b)
1384 { a.swap( b ); }
1385
1386 } // namespace tbb
1387
1388 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1389 #pragma warning (pop)
1390 #endif // warning 4267,4127 are back
1391
1392
1393 #undef __TBB_concurrent_vector_H_include_area
1394 #include "internal/_warning_suppress_disable_notice.h"
1395
1396 #endif /* __TBB_concurrent_vector_H */
1397