1 /*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #ifndef __TBB_task_group_H
18 #define __TBB_task_group_H
19
20 #define __TBB_task_group_H_include_area
21 #include "internal/_warning_suppress_enable_notice.h"
22
23 #include "task.h"
24 #include "tbb_exception.h"
25 #include "internal/_template_helpers.h"
26 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
27 #include "task_arena.h"
28 #endif
29
30 #if __TBB_TASK_GROUP_CONTEXT
31
32 namespace tbb {
33
34 namespace internal {
35 template<typename F> class task_handle_task;
36 }
37
38 class task_group;
39 class structured_task_group;
40 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
41 class isolated_task_group;
42 #endif
43
44 template<typename F>
45 class task_handle : internal::no_assign {
46 template<typename _F> friend class internal::task_handle_task;
47 friend class task_group;
48 friend class structured_task_group;
49 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
50 friend class isolated_task_group;
51 #endif
52
53 static const intptr_t scheduled = 0x1;
54
55 F my_func;
56 intptr_t my_state;
57
mark_scheduled()58 void mark_scheduled () {
59 // The check here is intentionally lax to avoid the impact of interlocked operation
60 if ( my_state & scheduled )
61 internal::throw_exception( internal::eid_invalid_multiple_scheduling );
62 my_state |= scheduled;
63 }
64 public:
task_handle(const F & f)65 task_handle( const F& f ) : my_func(f), my_state(0) {}
66 #if __TBB_CPP11_RVALUE_REF_PRESENT
task_handle(F && f)67 task_handle( F&& f ) : my_func( std::move(f)), my_state(0) {}
68 #endif
69
operator()70 void operator() () const { my_func(); }
71 };
72
73 enum task_group_status {
74 not_complete,
75 complete,
76 canceled
77 };
78
79 namespace internal {
80
81 template<typename F>
82 class task_handle_task : public task {
83 task_handle<F>& my_handle;
execute()84 task* execute() __TBB_override {
85 my_handle();
86 return NULL;
87 }
88 public:
task_handle_task(task_handle<F> & h)89 task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }
90 };
91
92 class task_group_base : internal::no_copy {
93 class ref_count_guard : internal::no_copy {
94 task& my_task;
95 public:
ref_count_guard(task & t)96 ref_count_guard(task& t) : my_task(t) {
97 my_task.increment_ref_count();
98 }
~ref_count_guard()99 ~ref_count_guard() {
100 my_task.decrement_ref_count();
101 }
102 };
103 protected:
104 empty_task* my_root;
105 task_group_context my_context;
106
107 template<typename F>
internal_run_and_wait(F & f)108 task_group_status internal_run_and_wait( F& f ) {
109 __TBB_TRY {
110 if ( !my_context.is_group_execution_cancelled() ) {
111 // We need to increase the reference count of the root task to notify waiters that
112 // this task group has some work in progress.
113 ref_count_guard guard(*my_root);
114 f();
115 }
116 } __TBB_CATCH( ... ) {
117 my_context.register_pending_exception();
118 }
119 return wait();
120 }
121
122 template<typename Task, typename F>
prepare_task(__TBB_FORWARDING_REF (F)f)123 task* prepare_task( __TBB_FORWARDING_REF(F) f ) {
124 return new( task::allocate_additional_child_of(*my_root) ) Task( internal::forward<F>(f) );
125 }
126
127 public:
128 task_group_base( uintptr_t traits = 0 )
129 : my_context(task_group_context::bound, task_group_context::default_traits | traits)
130 {
131 my_root = new( task::allocate_root(my_context) ) empty_task;
132 my_root->set_ref_count(1);
133 }
134
__TBB_NOEXCEPT(false)135 ~task_group_base() __TBB_NOEXCEPT(false) {
136 if( my_root->ref_count() > 1 ) {
137 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
138 bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
139 #else
140 bool stack_unwinding_in_progress = std::uncaught_exception();
141 #endif
142 // Always attempt to do proper cleanup to avoid inevitable memory corruption
143 // in case of missing wait (for the sake of better testability & debuggability)
144 if ( !is_canceling() )
145 cancel();
146 __TBB_TRY {
147 my_root->wait_for_all();
148 } __TBB_CATCH (...) {
149 task::destroy(*my_root);
150 __TBB_RETHROW();
151 }
152 task::destroy(*my_root);
153 if ( !stack_unwinding_in_progress )
154 internal::throw_exception( internal::eid_missing_wait );
155 }
156 else {
157 task::destroy(*my_root);
158 }
159 }
160
161 template<typename F>
run(task_handle<F> & h)162 void run( task_handle<F>& h ) {
163 task::spawn( *prepare_task< internal::task_handle_task<F> >(h) );
164 }
165
wait()166 task_group_status wait() {
167 __TBB_TRY {
168 my_root->wait_for_all();
169 } __TBB_CATCH( ... ) {
170 my_context.reset();
171 __TBB_RETHROW();
172 }
173 if ( my_context.is_group_execution_cancelled() ) {
174 // TODO: the reset method is not thread-safe. Ensure the correct behavior.
175 my_context.reset();
176 return canceled;
177 }
178 return complete;
179 }
180
is_canceling()181 bool is_canceling() {
182 return my_context.is_group_execution_cancelled();
183 }
184
cancel()185 void cancel() {
186 my_context.cancel_group_execution();
187 }
188 }; // class task_group_base
189
190 } // namespace internal
191
192 class task_group : public internal::task_group_base {
193 public:
task_group()194 task_group () : task_group_base( task_group_context::concurrent_wait ) {}
195
196 #if __SUNPRO_CC
197 template<typename F>
run(task_handle<F> & h)198 void run( task_handle<F>& h ) {
199 internal_run< internal::task_handle_task<F> >( h );
200 }
201 #else
202 using task_group_base::run;
203 #endif
204
205 #if __TBB_CPP11_RVALUE_REF_PRESENT
206 template<typename F>
run(F && f)207 void run( F&& f ) {
208 task::spawn( *prepare_task< internal::function_task< typename internal::strip<F>::type > >(std::forward<F>(f)) );
209 }
210 #else
211 template<typename F>
run(const F & f)212 void run(const F& f) {
213 task::spawn( *prepare_task< internal::function_task<F> >(f) );
214 }
215 #endif
216
217 template<typename F>
run_and_wait(const F & f)218 task_group_status run_and_wait( const F& f ) {
219 return internal_run_and_wait<const F>( f );
220 }
221
222 // TODO: add task_handle rvalues support
223 template<typename F>
run_and_wait(task_handle<F> & h)224 task_group_status run_and_wait( task_handle<F>& h ) {
225 h.mark_scheduled();
226 return internal_run_and_wait< task_handle<F> >( h );
227 }
228 }; // class task_group
229
230 class __TBB_DEPRECATED structured_task_group : public internal::task_group_base {
231 public:
232 // TODO: add task_handle rvalues support
233 template<typename F>
run_and_wait(task_handle<F> & h)234 task_group_status run_and_wait ( task_handle<F>& h ) {
235 h.mark_scheduled();
236 return internal_run_and_wait< task_handle<F> >( h );
237 }
238
wait()239 task_group_status wait() {
240 task_group_status res = task_group_base::wait();
241 my_root->set_ref_count(1);
242 return res;
243 }
244 }; // class structured_task_group
245
246 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
247 namespace internal {
248 using interface7::internal::delegate_base;
249 using interface7::internal::isolate_within_arena;
250
251 class spawn_delegate : public delegate_base {
252 task* task_to_spawn;
operator()253 void operator()() const __TBB_override {
254 task::spawn(*task_to_spawn);
255 }
256 public:
spawn_delegate(task * a_task)257 spawn_delegate(task* a_task) : task_to_spawn(a_task) {}
258 };
259
260 class wait_delegate : public delegate_base {
operator()261 void operator()() const __TBB_override {
262 status = tg.wait();
263 }
264 protected:
265 task_group& tg;
266 task_group_status& status;
267 public:
wait_delegate(task_group & a_group,task_group_status & tgs)268 wait_delegate(task_group& a_group, task_group_status& tgs)
269 : tg(a_group), status(tgs) {}
270 };
271
272 template<typename F>
273 class run_wait_delegate : public wait_delegate {
274 F& func;
operator()275 void operator()() const __TBB_override {
276 status = tg.run_and_wait( func );
277 }
278 public:
run_wait_delegate(task_group & a_group,F & a_func,task_group_status & tgs)279 run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
280 : wait_delegate(a_group, tgs), func(a_func) {}
281 };
282 } // namespace internal
283
284 class isolated_task_group : public task_group {
this_isolation()285 intptr_t this_isolation() {
286 return reinterpret_cast<intptr_t>(this);
287 }
288 public:
isolated_task_group()289 isolated_task_group () : task_group() {}
290
291 #if __TBB_CPP11_RVALUE_REF_PRESENT
292 template<typename F>
run(F && f)293 void run( F&& f ) {
294 internal::spawn_delegate sd(
295 prepare_task< internal::function_task< typename internal::strip<F>::type > >(std::forward<F>(f))
296 );
297 internal::isolate_within_arena( sd, this_isolation() );
298 }
299 #else
300 template<typename F>
run(const F & f)301 void run(const F& f) {
302 internal::spawn_delegate sd( prepare_task< internal::function_task<F> >(f) );
303 internal::isolate_within_arena( sd, this_isolation() );
304 }
305 #endif
306
307 template<typename F>
run_and_wait(const F & f)308 task_group_status run_and_wait( const F& f ) {
309 task_group_status result = not_complete;
310 internal::run_wait_delegate< const F > rwd( *this, f, result );
311 internal::isolate_within_arena( rwd, this_isolation() );
312 __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
313 return result;
314 }
315
316 // TODO: add task_handle rvalues support
317 template<typename F>
run(task_handle<F> & h)318 void run( task_handle<F>& h ) {
319 internal::spawn_delegate sd( prepare_task< internal::task_handle_task<F> >(h) );
320 internal::isolate_within_arena( sd, this_isolation() );
321 }
322
323 template<typename F>
run_and_wait(task_handle<F> & h)324 task_group_status run_and_wait ( task_handle<F>& h ) {
325 task_group_status result = not_complete;
326 internal::run_wait_delegate< task_handle<F> > rwd( *this, h, result );
327 internal::isolate_within_arena( rwd, this_isolation() );
328 __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
329 return result;
330 }
331
wait()332 task_group_status wait() {
333 task_group_status result = not_complete;
334 internal::wait_delegate wd( *this, result );
335 internal::isolate_within_arena( wd, this_isolation() );
336 __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
337 return result;
338 }
339 }; // class isolated_task_group
340 #endif // TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
341
342 inline
is_current_task_group_canceling()343 bool is_current_task_group_canceling() {
344 return task::self().is_cancelled();
345 }
346
347 #if __TBB_CPP11_RVALUE_REF_PRESENT
348 template<class F>
make_task(F && f)349 task_handle< typename internal::strip<F>::type > make_task( F&& f ) {
350 return task_handle< typename internal::strip<F>::type >( std::forward<F>(f) );
351 }
352 #else
353 template<class F>
make_task(const F & f)354 task_handle<F> make_task( const F& f ) {
355 return task_handle<F>( f );
356 }
357 #endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
358
359 } // namespace tbb
360
361 #endif /* __TBB_TASK_GROUP_CONTEXT */
362
363 #include "internal/_warning_suppress_disable_notice.h"
364 #undef __TBB_task_group_H_include_area
365
366 #endif /* __TBB_task_group_H */
367