1 // -*- C++ -*-
2 //===-- parallel_backend_tbb.h --------------------------------------------===//
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9
10 #ifndef _PSTL_PARALLEL_BACKEND_TBB_H
11 #define _PSTL_PARALLEL_BACKEND_TBB_H
12
13 #include <algorithm>
14 #include <type_traits>
15
16 #include "parallel_backend_utils.h"
17
18 // Bring in minimal required subset of Intel TBB
19 #include <tbb/blocked_range.h>
20 #include <tbb/parallel_for.h>
21 #include <tbb/parallel_reduce.h>
22 #include <tbb/parallel_scan.h>
23 #include <tbb/parallel_invoke.h>
24 #include <tbb/task_arena.h>
25 #include <tbb/tbb_allocator.h>
26
27 #if TBB_INTERFACE_VERSION < 10000
28 # error Intel(R) Threading Building Blocks 2018 is required; older versions are not supported.
29 #endif
30
31 namespace __pstl
32 {
33 namespace __par_backend
34 {
35
36 //! Raw memory buffer with automatic freeing and no exceptions.
37 /** Some of our algorithms need to start with raw memory buffer,
38 not an initialize array, because initialization/destruction
39 would make the span be at least O(N). */
40 // tbb::allocator can improve performance in some cases.
41 template <typename _Tp>
42 class __buffer
43 {
44 tbb::tbb_allocator<_Tp> _M_allocator;
45 _Tp* _M_ptr;
46 const std::size_t _M_buf_size;
47 __buffer(const __buffer&) = delete;
48 void
49 operator=(const __buffer&) = delete;
50
51 public:
52 //! Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t n)53 __buffer(std::size_t n) : _M_allocator(), _M_ptr(_M_allocator.allocate(n)), _M_buf_size(n) {}
54 //! True if buffer was successfully obtained, zero otherwise.
55 operator bool() const { return _M_ptr != NULL; }
56 //! Return pointer to buffer, or NULL if buffer could not be obtained.
57 _Tp*
get()58 get() const
59 {
60 return _M_ptr;
61 }
62 //! Destroy buffer
~__buffer()63 ~__buffer() { _M_allocator.deallocate(_M_ptr, _M_buf_size); }
64 };
65
66 // Wrapper for tbb::task
67 inline void
__cancel_execution()68 __cancel_execution()
69 {
70 tbb::task::self().group()->cancel_group_execution();
71 }
72
73 //------------------------------------------------------------------------
74 // parallel_for
75 //------------------------------------------------------------------------
76
77 template <class _Index, class _RealBody>
78 class __parallel_for_body
79 {
80 public:
__parallel_for_body(const _RealBody & __body)81 __parallel_for_body(const _RealBody& __body) : _M_body(__body) {}
__parallel_for_body(const __parallel_for_body & __body)82 __parallel_for_body(const __parallel_for_body& __body) : _M_body(__body._M_body) {}
83 void
operator()84 operator()(const tbb::blocked_range<_Index>& __range) const
85 {
86 _M_body(__range.begin(), __range.end());
87 }
88
89 private:
90 _RealBody _M_body;
91 };
92
93 //! Evaluation of brick f[i,j) for each subrange [i,j) of [first,last)
94 // wrapper over tbb::parallel_for
95 template <class _ExecutionPolicy, class _Index, class _Fp>
96 void
__parallel_for(_ExecutionPolicy &&,_Index __first,_Index __last,_Fp __f)97 __parallel_for(_ExecutionPolicy&&, _Index __first, _Index __last, _Fp __f)
98 {
99 tbb::this_task_arena::isolate([=]() {
100 tbb::parallel_for(tbb::blocked_range<_Index>(__first, __last), __parallel_for_body<_Index, _Fp>(__f));
101 });
102 }
103
104 //! Evaluation of brick f[i,j) for each subrange [i,j) of [first,last)
105 // wrapper over tbb::parallel_reduce
106 template <class _ExecutionPolicy, class _Value, class _Index, typename _RealBody, typename _Reduction>
107 _Value
__parallel_reduce(_ExecutionPolicy &&,_Index __first,_Index __last,const _Value & __identity,const _RealBody & __real_body,const _Reduction & __reduction)108 __parallel_reduce(_ExecutionPolicy&&, _Index __first, _Index __last, const _Value& __identity,
109 const _RealBody& __real_body, const _Reduction& __reduction)
110 {
111 return tbb::this_task_arena::isolate([__first, __last, &__identity, &__real_body, &__reduction]() -> _Value {
112 return tbb::parallel_reduce(
113 tbb::blocked_range<_Index>(__first, __last), __identity,
114 [__real_body](const tbb::blocked_range<_Index>& __r, const _Value& __value) -> _Value {
115 return __real_body(__r.begin(), __r.end(), __value);
116 },
117 __reduction);
118 });
119 }
120
121 //------------------------------------------------------------------------
122 // parallel_transform_reduce
123 //
124 // Notation:
125 // r(i,j,init) returns reduction of init with reduction over [i,j)
126 // u(i) returns f(i,i+1,identity) for a hypothetical left identity element of r
127 // c(x,y) combines values x and y that were the result of r or u
128 //------------------------------------------------------------------------
129
130 template <class _Index, class _Up, class _Tp, class _Cp, class _Rp>
131 struct __par_trans_red_body
132 {
133 alignas(_Tp) char _M_sum_storage[sizeof(_Tp)]; // Holds generalized non-commutative sum when has_sum==true
134 _Rp _M_brick_reduce; // Most likely to have non-empty layout
135 _Up _M_u;
136 _Cp _M_combine;
137 bool _M_has_sum; // Put last to minimize size of class
138 _Tp&
sum__par_trans_red_body139 sum()
140 {
141 _PSTL_ASSERT_MSG(_M_has_sum, "sum expected");
142 return *(_Tp*)_M_sum_storage;
143 }
__par_trans_red_body__par_trans_red_body144 __par_trans_red_body(_Up __u, _Tp __init, _Cp __c, _Rp __r)
145 : _M_brick_reduce(__r), _M_u(__u), _M_combine(__c), _M_has_sum(true)
146 {
147 new (_M_sum_storage) _Tp(__init);
148 }
149
__par_trans_red_body__par_trans_red_body150 __par_trans_red_body(__par_trans_red_body& __left, tbb::split)
151 : _M_brick_reduce(__left._M_brick_reduce), _M_u(__left._M_u), _M_combine(__left._M_combine), _M_has_sum(false)
152 {
153 }
154
~__par_trans_red_body__par_trans_red_body155 ~__par_trans_red_body()
156 {
157 // 17.6.5.12 tells us to not worry about catching exceptions from destructors.
158 if (_M_has_sum)
159 sum().~_Tp();
160 }
161
162 void
join__par_trans_red_body163 join(__par_trans_red_body& __rhs)
164 {
165 sum() = _M_combine(sum(), __rhs.sum());
166 }
167
168 void
operator__par_trans_red_body169 operator()(const tbb::blocked_range<_Index>& __range)
170 {
171 _Index __i = __range.begin();
172 _Index __j = __range.end();
173 if (!_M_has_sum)
174 {
175 _PSTL_ASSERT_MSG(__range.size() > 1, "there should be at least 2 elements");
176 new (&_M_sum_storage)
177 _Tp(_M_combine(_M_u(__i), _M_u(__i + 1))); // The condition i+1 < j is provided by the grain size of 3
178 _M_has_sum = true;
179 std::advance(__i, 2);
180 if (__i == __j)
181 return;
182 }
183 sum() = _M_brick_reduce(__i, __j, sum());
184 }
185 };
186
187 template <class _ExecutionPolicy, class _Index, class _Up, class _Tp, class _Cp, class _Rp>
188 _Tp
__parallel_transform_reduce(_ExecutionPolicy &&,_Index __first,_Index __last,_Up __u,_Tp __init,_Cp __combine,_Rp __brick_reduce)189 __parallel_transform_reduce(_ExecutionPolicy&&, _Index __first, _Index __last, _Up __u, _Tp __init, _Cp __combine,
190 _Rp __brick_reduce)
191 {
192 __par_backend::__par_trans_red_body<_Index, _Up, _Tp, _Cp, _Rp> __body(__u, __init, __combine, __brick_reduce);
193 // The grain size of 3 is used in order to provide mininum 2 elements for each body
194 tbb::this_task_arena::isolate(
195 [__first, __last, &__body]() { tbb::parallel_reduce(tbb::blocked_range<_Index>(__first, __last, 3), __body); });
196 return __body.sum();
197 }
198
199 //------------------------------------------------------------------------
200 // parallel_scan
201 //------------------------------------------------------------------------
202
203 template <class _Index, class _Up, class _Tp, class _Cp, class _Rp, class _Sp>
204 class __trans_scan_body
205 {
206 alignas(_Tp) char _M_sum_storage[sizeof(_Tp)]; // Holds generalized non-commutative sum when has_sum==true
207 _Rp _M_brick_reduce; // Most likely to have non-empty layout
208 _Up _M_u;
209 _Cp _M_combine;
210 _Sp _M_scan;
211 bool _M_has_sum; // Put last to minimize size of class
212 public:
__trans_scan_body(_Up __u,_Tp __init,_Cp __combine,_Rp __reduce,_Sp __scan)213 __trans_scan_body(_Up __u, _Tp __init, _Cp __combine, _Rp __reduce, _Sp __scan)
214 : _M_brick_reduce(__reduce), _M_u(__u), _M_combine(__combine), _M_scan(__scan), _M_has_sum(true)
215 {
216 new (_M_sum_storage) _Tp(__init);
217 }
218
__trans_scan_body(__trans_scan_body & __b,tbb::split)219 __trans_scan_body(__trans_scan_body& __b, tbb::split)
220 : _M_brick_reduce(__b._M_brick_reduce), _M_u(__b._M_u), _M_combine(__b._M_combine), _M_scan(__b._M_scan),
221 _M_has_sum(false)
222 {
223 }
224
~__trans_scan_body()225 ~__trans_scan_body()
226 {
227 // 17.6.5.12 tells us to not worry about catching exceptions from destructors.
228 if (_M_has_sum)
229 sum().~_Tp();
230 }
231
232 _Tp&
sum()233 sum() const
234 {
235 _PSTL_ASSERT_MSG(_M_has_sum, "sum expected");
236 return *const_cast<_Tp*>(reinterpret_cast<_Tp const*>(_M_sum_storage));
237 }
238
239 void
operator()240 operator()(const tbb::blocked_range<_Index>& __range, tbb::pre_scan_tag)
241 {
242 _Index __i = __range.begin();
243 _Index __j = __range.end();
244 if (!_M_has_sum)
245 {
246 new (&_M_sum_storage) _Tp(_M_u(__i));
247 _M_has_sum = true;
248 ++__i;
249 if (__i == __j)
250 return;
251 }
252 sum() = _M_brick_reduce(__i, __j, sum());
253 }
254
255 void
operator()256 operator()(const tbb::blocked_range<_Index>& __range, tbb::final_scan_tag)
257 {
258 sum() = _M_scan(__range.begin(), __range.end(), sum());
259 }
260
261 void
reverse_join(__trans_scan_body & __a)262 reverse_join(__trans_scan_body& __a)
263 {
264 if (_M_has_sum)
265 {
266 sum() = _M_combine(__a.sum(), sum());
267 }
268 else
269 {
270 new (&_M_sum_storage) _Tp(__a.sum());
271 _M_has_sum = true;
272 }
273 }
274
275 void
assign(__trans_scan_body & __b)276 assign(__trans_scan_body& __b)
277 {
278 sum() = __b.sum();
279 }
280 };
281
282 template <typename _Index>
283 _Index
__split(_Index __m)284 __split(_Index __m)
285 {
286 _Index __k = 1;
287 while (2 * __k < __m)
288 __k *= 2;
289 return __k;
290 }
291
292 //------------------------------------------------------------------------
293 // __parallel_strict_scan
294 //------------------------------------------------------------------------
295
296 template <typename _Index, typename _Tp, typename _Rp, typename _Cp>
297 void
__upsweep(_Index __i,_Index __m,_Index __tilesize,_Tp * __r,_Index __lastsize,_Rp __reduce,_Cp __combine)298 __upsweep(_Index __i, _Index __m, _Index __tilesize, _Tp* __r, _Index __lastsize, _Rp __reduce, _Cp __combine)
299 {
300 if (__m == 1)
301 __r[0] = __reduce(__i * __tilesize, __lastsize);
302 else
303 {
304 _Index __k = __split(__m);
305 tbb::parallel_invoke(
306 [=] { __par_backend::__upsweep(__i, __k, __tilesize, __r, __tilesize, __reduce, __combine); },
307 [=] {
308 __par_backend::__upsweep(__i + __k, __m - __k, __tilesize, __r + __k, __lastsize, __reduce, __combine);
309 });
310 if (__m == 2 * __k)
311 __r[__m - 1] = __combine(__r[__k - 1], __r[__m - 1]);
312 }
313 }
314
315 template <typename _Index, typename _Tp, typename _Cp, typename _Sp>
316 void
__downsweep(_Index __i,_Index __m,_Index __tilesize,_Tp * __r,_Index __lastsize,_Tp __initial,_Cp __combine,_Sp __scan)317 __downsweep(_Index __i, _Index __m, _Index __tilesize, _Tp* __r, _Index __lastsize, _Tp __initial, _Cp __combine,
318 _Sp __scan)
319 {
320 if (__m == 1)
321 __scan(__i * __tilesize, __lastsize, __initial);
322 else
323 {
324 const _Index __k = __split(__m);
325 tbb::parallel_invoke(
326 [=] { __par_backend::__downsweep(__i, __k, __tilesize, __r, __tilesize, __initial, __combine, __scan); },
327 // Assumes that __combine never throws.
328 //TODO: Consider adding a requirement for user functors to be constant.
329 [=, &__combine] {
330 __par_backend::__downsweep(__i + __k, __m - __k, __tilesize, __r + __k, __lastsize,
331 __combine(__initial, __r[__k - 1]), __combine, __scan);
332 });
333 }
334 }
335
336 // Adapted from Intel(R) Cilk(TM) version from cilkpub.
337 // Let i:len denote a counted interval of length n starting at i. s denotes a generalized-sum value.
338 // Expected actions of the functors are:
339 // reduce(i,len) -> s -- return reduction value of i:len.
340 // combine(s1,s2) -> s -- return merged sum
341 // apex(s) -- do any processing necessary between reduce and scan.
342 // scan(i,len,initial) -- perform scan over i:len starting with initial.
343 // The initial range 0:n is partitioned into consecutive subranges.
344 // reduce and scan are each called exactly once per subrange.
345 // Thus callers can rely upon side effects in reduce.
346 // combine must not throw an exception.
347 // apex is called exactly once, after all calls to reduce and before all calls to scan.
348 // For example, it's useful for allocating a __buffer used by scan but whose size is the sum of all reduction values.
349 // T must have a trivial constructor and destructor.
350 template <class _ExecutionPolicy, typename _Index, typename _Tp, typename _Rp, typename _Cp, typename _Sp, typename _Ap>
351 void
__parallel_strict_scan(_ExecutionPolicy &&,_Index __n,_Tp __initial,_Rp __reduce,_Cp __combine,_Sp __scan,_Ap __apex)352 __parallel_strict_scan(_ExecutionPolicy&&, _Index __n, _Tp __initial, _Rp __reduce, _Cp __combine, _Sp __scan,
353 _Ap __apex)
354 {
355 tbb::this_task_arena::isolate([=, &__combine]() {
356 if (__n > 1)
357 {
358 _Index __p = tbb::this_task_arena::max_concurrency();
359 const _Index __slack = 4;
360 _Index __tilesize = (__n - 1) / (__slack * __p) + 1;
361 _Index __m = (__n - 1) / __tilesize;
362 __buffer<_Tp> __buf(__m + 1);
363 _Tp* __r = __buf.get();
364 __par_backend::__upsweep(_Index(0), _Index(__m + 1), __tilesize, __r, __n - __m * __tilesize, __reduce,
365 __combine);
366
367 // When __apex is a no-op and __combine has no side effects, a good optimizer
368 // should be able to eliminate all code between here and __apex.
369 // Alternatively, provide a default value for __apex that can be
370 // recognized by metaprogramming that conditionlly executes the following.
371 size_t __k = __m + 1;
372 _Tp __t = __r[__k - 1];
373 while ((__k &= __k - 1))
374 __t = __combine(__r[__k - 1], __t);
375 __apex(__combine(__initial, __t));
376 __par_backend::__downsweep(_Index(0), _Index(__m + 1), __tilesize, __r, __n - __m * __tilesize, __initial,
377 __combine, __scan);
378 return;
379 }
380 // Fewer than 2 elements in sequence, or out of memory. Handle has single block.
381 _Tp __sum = __initial;
382 if (__n)
383 __sum = __combine(__sum, __reduce(_Index(0), __n));
384 __apex(__sum);
385 if (__n)
386 __scan(_Index(0), __n, __initial);
387 });
388 }
389
390 template <class _ExecutionPolicy, class _Index, class _Up, class _Tp, class _Cp, class _Rp, class _Sp>
391 _Tp
__parallel_transform_scan(_ExecutionPolicy &&,_Index __n,_Up __u,_Tp __init,_Cp __combine,_Rp __brick_reduce,_Sp __scan)392 __parallel_transform_scan(_ExecutionPolicy&&, _Index __n, _Up __u, _Tp __init, _Cp __combine, _Rp __brick_reduce,
393 _Sp __scan)
394 {
395 __trans_scan_body<_Index, _Up, _Tp, _Cp, _Rp, _Sp> __body(__u, __init, __combine, __brick_reduce, __scan);
396 auto __range = tbb::blocked_range<_Index>(0, __n);
397 tbb::this_task_arena::isolate([__range, &__body]() { tbb::parallel_scan(__range, __body); });
398 return __body.sum();
399 }
400
401 //------------------------------------------------------------------------
402 // parallel_stable_sort
403 //------------------------------------------------------------------------
404
405 //------------------------------------------------------------------------
406 // stable_sort utilities
407 //
408 // These are used by parallel implementations but do not depend on them.
409 //------------------------------------------------------------------------
410
411 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _RandomAccessIterator3,
412 typename _Compare, typename _Cleanup, typename _LeafMerge>
413 class __merge_task : public tbb::task
414 {
415 /*override*/ tbb::task*
416 execute();
417 _RandomAccessIterator1 _M_xs, _M_xe;
418 _RandomAccessIterator2 _M_ys, _M_ye;
419 _RandomAccessIterator3 _M_zs;
420 _Compare _M_comp;
421 _Cleanup _M_cleanup;
422 _LeafMerge _M_leaf_merge;
423
424 public:
__merge_task(_RandomAccessIterator1 __xs,_RandomAccessIterator1 __xe,_RandomAccessIterator2 __ys,_RandomAccessIterator2 __ye,_RandomAccessIterator3 __zs,_Compare __comp,_Cleanup __cleanup,_LeafMerge __leaf_merge)425 __merge_task(_RandomAccessIterator1 __xs, _RandomAccessIterator1 __xe, _RandomAccessIterator2 __ys,
426 _RandomAccessIterator2 __ye, _RandomAccessIterator3 __zs, _Compare __comp, _Cleanup __cleanup,
427 _LeafMerge __leaf_merge)
428 : _M_xs(__xs), _M_xe(__xe), _M_ys(__ys), _M_ye(__ye), _M_zs(__zs), _M_comp(__comp), _M_cleanup(__cleanup),
429 _M_leaf_merge(__leaf_merge)
430 {
431 }
432 };
433
434 #define _PSTL_MERGE_CUT_OFF 2000
435
436 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _RandomAccessIterator3,
437 typename __M_Compare, typename _Cleanup, typename _LeafMerge>
438 tbb::task*
439 __merge_task<_RandomAccessIterator1, _RandomAccessIterator2, _RandomAccessIterator3, __M_Compare, _Cleanup,
execute()440 _LeafMerge>::execute()
441 {
442 typedef typename std::iterator_traits<_RandomAccessIterator1>::difference_type _DifferenceType1;
443 typedef typename std::iterator_traits<_RandomAccessIterator2>::difference_type _DifferenceType2;
444 typedef typename std::common_type<_DifferenceType1, _DifferenceType2>::type _SizeType;
445 const _SizeType __n = (_M_xe - _M_xs) + (_M_ye - _M_ys);
446 const _SizeType __merge_cut_off = _PSTL_MERGE_CUT_OFF;
447 if (__n <= __merge_cut_off)
448 {
449 _M_leaf_merge(_M_xs, _M_xe, _M_ys, _M_ye, _M_zs, _M_comp);
450
451 //we clean the buffer one time on last step of the sort
452 _M_cleanup(_M_xs, _M_xe);
453 _M_cleanup(_M_ys, _M_ye);
454 return nullptr;
455 }
456 else
457 {
458 _RandomAccessIterator1 __xm;
459 _RandomAccessIterator2 __ym;
460 if (_M_xe - _M_xs < _M_ye - _M_ys)
461 {
462 __ym = _M_ys + (_M_ye - _M_ys) / 2;
463 __xm = std::upper_bound(_M_xs, _M_xe, *__ym, _M_comp);
464 }
465 else
466 {
467 __xm = _M_xs + (_M_xe - _M_xs) / 2;
468 __ym = std::lower_bound(_M_ys, _M_ye, *__xm, _M_comp);
469 }
470 const _RandomAccessIterator3 __zm = _M_zs + ((__xm - _M_xs) + (__ym - _M_ys));
471 tbb::task* __right = new (tbb::task::allocate_additional_child_of(*parent()))
472 __merge_task(__xm, _M_xe, __ym, _M_ye, __zm, _M_comp, _M_cleanup, _M_leaf_merge);
473 tbb::task::spawn(*__right);
474 tbb::task::recycle_as_continuation();
475 _M_xe = __xm;
476 _M_ye = __ym;
477 }
478 return this;
479 }
480
481 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _Compare, typename _LeafSort>
482 class __stable_sort_task : public tbb::task
483 {
484 public:
485 typedef typename std::iterator_traits<_RandomAccessIterator1>::difference_type _DifferenceType1;
486 typedef typename std::iterator_traits<_RandomAccessIterator2>::difference_type _DifferenceType2;
487 typedef typename std::common_type<_DifferenceType1, _DifferenceType2>::type _SizeType;
488
489 private:
490 /*override*/ tbb::task*
491 execute();
492 _RandomAccessIterator1 _M_xs, _M_xe;
493 _RandomAccessIterator2 _M_zs;
494 _Compare _M_comp;
495 _LeafSort _M_leaf_sort;
496 int32_t _M_inplace;
497 _SizeType _M_nsort;
498
499 public:
__stable_sort_task(_RandomAccessIterator1 __xs,_RandomAccessIterator1 __xe,_RandomAccessIterator2 __zs,int32_t __inplace,_Compare __comp,_LeafSort __leaf_sort,_SizeType __n)500 __stable_sort_task(_RandomAccessIterator1 __xs, _RandomAccessIterator1 __xe, _RandomAccessIterator2 __zs,
501 int32_t __inplace, _Compare __comp, _LeafSort __leaf_sort, _SizeType __n)
502 : _M_xs(__xs), _M_xe(__xe), _M_zs(__zs), _M_comp(__comp), _M_leaf_sort(__leaf_sort), _M_inplace(__inplace),
503 _M_nsort(__n)
504 {
505 }
506 };
507
508 //! Binary operator that does nothing
509 struct __binary_no_op
510 {
511 template <typename _Tp>
operator__binary_no_op512 void operator()(_Tp, _Tp)
513 {
514 }
515 };
516
517 #define _PSTL_STABLE_SORT_CUT_OFF 500
518
519 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _Compare, typename _LeafSort>
520 tbb::task*
execute()521 __stable_sort_task<_RandomAccessIterator1, _RandomAccessIterator2, _Compare, _LeafSort>::execute()
522 {
523 const _SizeType __n = _M_xe - _M_xs;
524 const _SizeType __nmerge = _M_nsort > 0 ? _M_nsort : __n;
525 const _SizeType __sort_cut_off = _PSTL_STABLE_SORT_CUT_OFF;
526 if (__n <= __sort_cut_off)
527 {
528 _M_leaf_sort(_M_xs, _M_xe, _M_comp);
529 if (_M_inplace != 2)
530 __par_backend::__init_buf(_M_xs, _M_xe, _M_zs, _M_inplace == 0);
531 return NULL;
532 }
533 else
534 {
535 const _RandomAccessIterator1 __xm = _M_xs + __n / 2;
536 const _RandomAccessIterator2 __zm = _M_zs + (__xm - _M_xs);
537 const _RandomAccessIterator2 __ze = _M_zs + __n;
538 task* __m;
539 auto __move_values = [](_RandomAccessIterator2 __x, _RandomAccessIterator1 __z) { *__z = std::move(*__x); };
540 auto __move_sequences = [](_RandomAccessIterator2 __first1, _RandomAccessIterator2 __last1,
541 _RandomAccessIterator1 __first2) { return std::move(__first1, __last1, __first2); };
542 if (_M_inplace == 2)
543 __m = new (tbb::task::allocate_continuation())
544 __merge_task<_RandomAccessIterator2, _RandomAccessIterator2, _RandomAccessIterator1, _Compare,
545 __serial_destroy,
546 __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>>(
547 _M_zs, __zm, __zm, __ze, _M_xs, _M_comp, __serial_destroy(),
548 __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>(
549 __nmerge, __move_values, __move_sequences));
550 else if (_M_inplace)
551 __m = new (tbb::task::allocate_continuation())
552 __merge_task<_RandomAccessIterator2, _RandomAccessIterator2, _RandomAccessIterator1, _Compare,
553 __par_backend::__binary_no_op,
554 __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>>(
555 _M_zs, __zm, __zm, __ze, _M_xs, _M_comp, __par_backend::__binary_no_op(),
556 __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>(
557 __nmerge, __move_values, __move_sequences));
558 else
559 {
560 auto __move_values = [](_RandomAccessIterator1 __x, _RandomAccessIterator2 __z) { *__z = std::move(*__x); };
561 auto __move_sequences = [](_RandomAccessIterator1 __first1, _RandomAccessIterator1 __last1,
562 _RandomAccessIterator2 __first2) {
563 return std::move(__first1, __last1, __first2);
564 };
565 __m = new (tbb::task::allocate_continuation())
566 __merge_task<_RandomAccessIterator1, _RandomAccessIterator1, _RandomAccessIterator2, _Compare,
567 __par_backend::__binary_no_op,
568 __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>>(
569 _M_xs, __xm, __xm, _M_xe, _M_zs, _M_comp, __par_backend::__binary_no_op(),
570 __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>(
571 __nmerge, __move_values, __move_sequences));
572 }
573 __m->set_ref_count(2);
574 task* __right = new (__m->allocate_child())
575 __stable_sort_task(__xm, _M_xe, __zm, !_M_inplace, _M_comp, _M_leaf_sort, __nmerge);
576 tbb::task::spawn(*__right);
577 tbb::task::recycle_as_child_of(*__m);
578 _M_xe = __xm;
579 _M_inplace = !_M_inplace;
580 }
581 return this;
582 }
583
584 template <class _ExecutionPolicy, typename _RandomAccessIterator, typename _Compare, typename _LeafSort>
585 void
586 __parallel_stable_sort(_ExecutionPolicy&&, _RandomAccessIterator __xs, _RandomAccessIterator __xe, _Compare __comp,
587 _LeafSort __leaf_sort, std::size_t __nsort = 0)
588 {
589 tbb::this_task_arena::isolate([=, &__nsort]() {
590 //sorting based on task tree and parallel merge
591 typedef typename std::iterator_traits<_RandomAccessIterator>::value_type _ValueType;
592 typedef typename std::iterator_traits<_RandomAccessIterator>::difference_type _DifferenceType;
593 const _DifferenceType __n = __xe - __xs;
594 if (__nsort == 0)
595 __nsort = __n;
596
597 const _DifferenceType __sort_cut_off = _PSTL_STABLE_SORT_CUT_OFF;
598 if (__n > __sort_cut_off)
599 {
600 _PSTL_ASSERT(__nsort > 0 && __nsort <= __n);
601 __buffer<_ValueType> __buf(__n);
602 using tbb::task;
603 task::spawn_root_and_wait(*new (task::allocate_root())
604 __stable_sort_task<_RandomAccessIterator, _ValueType*, _Compare, _LeafSort>(
605 __xs, __xe, (_ValueType*)__buf.get(), 2, __comp, __leaf_sort, __nsort));
606 return;
607 }
608 //serial sort
609 __leaf_sort(__xs, __xe, __comp);
610 });
611 }
612
613 //------------------------------------------------------------------------
614 // parallel_merge
615 //------------------------------------------------------------------------
616
617 template <class _ExecutionPolicy, typename _RandomAccessIterator1, typename _RandomAccessIterator2,
618 typename _RandomAccessIterator3, typename _Compare, typename _LeafMerge>
619 void
__parallel_merge(_ExecutionPolicy &&,_RandomAccessIterator1 __xs,_RandomAccessIterator1 __xe,_RandomAccessIterator2 __ys,_RandomAccessIterator2 __ye,_RandomAccessIterator3 __zs,_Compare __comp,_LeafMerge __leaf_merge)620 __parallel_merge(_ExecutionPolicy&&, _RandomAccessIterator1 __xs, _RandomAccessIterator1 __xe,
621 _RandomAccessIterator2 __ys, _RandomAccessIterator2 __ye, _RandomAccessIterator3 __zs, _Compare __comp,
622 _LeafMerge __leaf_merge)
623 {
624 typedef typename std::iterator_traits<_RandomAccessIterator1>::difference_type _DifferenceType1;
625 typedef typename std::iterator_traits<_RandomAccessIterator2>::difference_type _DifferenceType2;
626 typedef typename std::common_type<_DifferenceType1, _DifferenceType2>::type _SizeType;
627 const _SizeType __n = (__xe - __xs) + (__ye - __ys);
628 const _SizeType __merge_cut_off = _PSTL_MERGE_CUT_OFF;
629 if (__n <= __merge_cut_off)
630 {
631 // Fall back on serial merge
632 __leaf_merge(__xs, __xe, __ys, __ye, __zs, __comp);
633 }
634 else
635 {
636 tbb::this_task_arena::isolate([=]() {
637 typedef __merge_task<_RandomAccessIterator1, _RandomAccessIterator2, _RandomAccessIterator3, _Compare,
638 __par_backend::__binary_no_op, _LeafMerge>
639 _TaskType;
640 tbb::task::spawn_root_and_wait(*new (tbb::task::allocate_root()) _TaskType(
641 __xs, __xe, __ys, __ye, __zs, __comp, __par_backend::__binary_no_op(), __leaf_merge));
642 });
643 }
644 }
645
646 //------------------------------------------------------------------------
647 // parallel_invoke
648 //------------------------------------------------------------------------
649 template <class _ExecutionPolicy, typename _F1, typename _F2>
650 void
__parallel_invoke(_ExecutionPolicy &&,_F1 && __f1,_F2 && __f2)651 __parallel_invoke(_ExecutionPolicy&&, _F1&& __f1, _F2&& __f2)
652 {
653 //TODO: a version of tbb::this_task_arena::isolate with variadic arguments pack should be added in the future
654 tbb::this_task_arena::isolate([&]() { tbb::parallel_invoke(std::forward<_F1>(__f1), std::forward<_F2>(__f2)); });
655 }
656
657 } // namespace __par_backend
658 } // namespace __pstl
659
660 #endif /* _PSTL_PARALLEL_BACKEND_TBB_H */
661