1 /*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #include "tbb/parallel_do.h"
18 #include "tbb/task_scheduler_init.h"
19 #include "tbb/atomic.h"
20 #include "harness.h"
21 #include "harness_cpu.h"
22
23 #if defined(_MSC_VER) && defined(_Wp64)
24 // Workaround for overzealous compiler warnings in /Wp64 mode
25 #pragma warning (disable: 4267)
26 #endif /* _MSC_VER && _Wp64 */
27
28 #define N_DEPTHS 20
29
30 static tbb::atomic<int> g_values_counter;
31
32 class value_t {
33 size_t x;
34 value_t& operator= ( const value_t& );
35 public:
value_t(size_t xx)36 value_t ( size_t xx ) : x(xx) { ++g_values_counter; }
value_t(const value_t & v)37 value_t ( const value_t& v ) : x(v.x) { ++g_values_counter; }
38 #if __TBB_CPP11_RVALUE_REF_PRESENT
value_t(value_t && v)39 value_t ( value_t&& v ) : x(v.x) { ++g_values_counter; }
40 #endif
~value_t()41 ~value_t () { --g_values_counter; }
value() const42 size_t value() const volatile { return x; }
43 };
44
45 #include "harness_iterator.h"
46
47 static size_t g_tasks_expected = 0;
48 static tbb::atomic<size_t> g_tasks_observed;
49
FindNumOfTasks(size_t max_depth)50 size_t FindNumOfTasks ( size_t max_depth ) {
51 if( max_depth == 0 )
52 return 1;
53 return max_depth * FindNumOfTasks( max_depth - 1 ) + 1;
54 }
55
56 //! Simplest form of the parallel_do functor object.
57 class FakeTaskGeneratorBody {
58 public:
59 //! The simplest form of the function call operator
60 /** It does not allow adding new tasks during its execution. **/
operator ()(value_t depth) const61 void operator() ( value_t depth ) const {
62 g_tasks_observed += FindNumOfTasks(depth.value());
63 }
64 };
65
66 /** Work item is passed by reference here. **/
67 class FakeTaskGeneratorBody_RefVersion {
68 public:
operator ()(value_t & depth) const69 void operator() ( value_t& depth ) const {
70 g_tasks_observed += FindNumOfTasks(depth.value());
71 }
72 };
73
74 /** Work item is passed by reference to const here. **/
75 class FakeTaskGeneratorBody_ConstRefVersion {
76 public:
operator ()(const value_t & depth) const77 void operator() ( const value_t& depth ) const {
78 g_tasks_observed += FindNumOfTasks(depth.value());
79 }
80 };
81
82 /** Work item is passed by reference to volatile here. **/
83 class FakeTaskGeneratorBody_VolatileRefVersion {
84 public:
operator ()(volatile value_t & depth,tbb::parallel_do_feeder<value_t> &) const85 void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& ) const {
86 g_tasks_observed += FindNumOfTasks(depth.value());
87 }
88 };
89
90 #if __TBB_CPP11_RVALUE_REF_PRESENT
91 /** Work item is passed by rvalue reference here. **/
92 class FakeTaskGeneratorBody_RvalueRefVersion {
93 public:
operator ()(value_t && depth) const94 void operator() ( value_t&& depth ) const {
95 g_tasks_observed += FindNumOfTasks(depth.value());
96 }
97 };
98 #endif
99
do_work(const value_t & depth,tbb::parallel_do_feeder<value_t> & feeder)100 void do_work ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) {
101 ++g_tasks_observed;
102 value_t new_value(depth.value()-1);
103 for( size_t i = 0; i < depth.value(); ++i) {
104 if (i%2) feeder.add( new_value ); // pass lvalue
105 else feeder.add( value_t(depth.value()-1) ); // pass rvalue
106 }
107 }
108
109 //! Standard form of the parallel_do functor object.
110 /** Allows adding new work items on the fly. **/
111 class TaskGeneratorBody
112 {
113 public:
114 //! This form of the function call operator can be used when the body needs to add more work during the processing
operator ()(value_t depth,tbb::parallel_do_feeder<value_t> & feeder) const115 void operator() ( value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
116 do_work(depth, feeder);
117 }
118 private:
119 // Assert that parallel_do does not ever access body constructors
TaskGeneratorBody()120 TaskGeneratorBody () {}
121 TaskGeneratorBody ( const TaskGeneratorBody& );
122 // These functions need access to the default constructor
123 template<class Body, class Iterator> friend void TestBody( size_t );
124 template<class Body, class Iterator> friend void TestBody_MoveOnly( size_t );
125 };
126
127 /** Work item is passed by reference here. **/
128 class TaskGeneratorBody_RefVersion
129 {
130 public:
operator ()(value_t & depth,tbb::parallel_do_feeder<value_t> & feeder) const131 void operator() ( value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
132 do_work(depth, feeder);
133 }
134 };
135
136 /** Work item is passed as const here. Compilers must ignore the const qualifier. **/
137 class TaskGeneratorBody_ConstVersion
138 {
139 public:
operator ()(const value_t depth,tbb::parallel_do_feeder<value_t> & feeder) const140 void operator() ( const value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
141 do_work(depth, feeder);
142 }
143 };
144
145 /** Work item is passed by reference to const here. **/
146 class TaskGeneratorBody_ConstRefVersion
147 {
148 public:
operator ()(const value_t & depth,tbb::parallel_do_feeder<value_t> & feeder) const149 void operator() ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
150 do_work(depth, feeder);
151 }
152 };
153
154 /** Work item is passed by reference to volatile here. **/
155 class TaskGeneratorBody_VolatileRefVersion
156 {
157 public:
operator ()(volatile value_t & depth,tbb::parallel_do_feeder<value_t> & feeder) const158 void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
159 do_work(const_cast<value_t&>(depth), feeder);
160 }
161 };
162
163 /** Work item is passed by reference to const volatile here. **/
164 class TaskGeneratorBody_ConstVolatileRefVersion
165 {
166 public:
operator ()(const volatile value_t & depth,tbb::parallel_do_feeder<value_t> & feeder) const167 void operator() ( const volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
168 do_work(const_cast<value_t&>(depth), feeder);
169 }
170 };
171
172 #if __TBB_CPP11_RVALUE_REF_PRESENT
173 /** Work item is passed by rvalue reference here. **/
174 class TaskGeneratorBody_RvalueRefVersion
175 {
176 public:
operator ()(value_t && depth,tbb::parallel_do_feeder<value_t> & feeder) const177 void operator() ( value_t&& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
178 do_work(depth, feeder);
179 }
180 };
181 #endif
182
183 static value_t g_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
184
185 #if __TBB_CPP11_RVALUE_REF_PRESENT
186 template<class Body, class Iterator>
TestBody_MoveIter(const Body & body,Iterator begin,Iterator end)187 void TestBody_MoveIter ( const Body& body, Iterator begin, Iterator end ) {
188 typedef std::move_iterator<Iterator> MoveIterator;
189 MoveIterator mbegin(begin);
190 MoveIterator mend(end);
191 g_tasks_observed = 0;
192 tbb::parallel_do(mbegin, mend, body);
193 ASSERT (g_tasks_observed == g_tasks_expected, NULL);
194 }
195
196 template<class Body, class Iterator>
TestBody_MoveOnly(size_t depth)197 void TestBody_MoveOnly ( size_t depth ) {
198 typedef typename std::iterator_traits<Iterator>::value_type value_type;
199 value_type a_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
200 TestBody_MoveIter( Body(), Iterator(a_depths), Iterator(a_depths + depth));
201 }
202 #endif
203
204 template<class Body, class Iterator>
TestBody(size_t depth)205 void TestBody ( size_t depth ) {
206 typedef typename std::iterator_traits<Iterator>::value_type value_type;
207 value_type a_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
208 Body body;
209 Iterator begin(a_depths);
210 Iterator end(a_depths + depth);
211 g_tasks_observed = 0;
212 tbb::parallel_do(begin, end, body);
213 ASSERT (g_tasks_observed == g_tasks_expected, NULL);
214 #if __TBB_CPP11_RVALUE_REF_PRESENT
215 TestBody_MoveIter( body, Iterator(a_depths), Iterator(a_depths + depth) );
216 #endif
217 }
218
219 template<class Iterator>
TestIterator_Common(size_t depth)220 void TestIterator_Common ( size_t depth ) {
221 TestBody<FakeTaskGeneratorBody, Iterator> (depth);
222 TestBody<FakeTaskGeneratorBody_ConstRefVersion, Iterator> (depth);
223 TestBody<TaskGeneratorBody, Iterator> (depth);
224 TestBody<TaskGeneratorBody_ConstVersion, Iterator> (depth);
225 TestBody<TaskGeneratorBody_ConstRefVersion, Iterator> (depth);
226 }
227
228 template<class Iterator>
TestIterator_Const(size_t depth)229 void TestIterator_Const ( size_t depth ) {
230 TestIterator_Common<Iterator>(depth);
231 TestBody<TaskGeneratorBody_ConstVolatileRefVersion, Iterator> (depth);
232 }
233
234 template<class Iterator>
TestIterator_Modifiable(size_t depth)235 void TestIterator_Modifiable ( size_t depth ) {
236 TestIterator_Const<Iterator>(depth);
237 TestBody<FakeTaskGeneratorBody_RefVersion, Iterator> (depth);
238 TestBody<FakeTaskGeneratorBody_VolatileRefVersion, Iterator> (depth);
239 TestBody<TaskGeneratorBody_RefVersion, Iterator> (depth);
240 TestBody<TaskGeneratorBody_VolatileRefVersion, Iterator> (depth);
241 #if __TBB_CPP11_RVALUE_REF_PRESENT
242 TestBody_MoveOnly<FakeTaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
243 TestBody_MoveOnly<TaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
244 #endif
245 }
246
247 template<class Iterator>
TestIterator_Movable(size_t depth)248 void TestIterator_Movable ( size_t depth ) {
249 TestIterator_Common<Iterator>(depth);
250 #if __TBB_CPP11_RVALUE_REF_PRESENT
251 TestBody<FakeTaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
252 TestBody<TaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
253 #endif
254 }
255
Run(int)256 void Run( int /*nthread*/ ) {
257 for( size_t depth = 0; depth <= N_DEPTHS; ++depth ) {
258 g_tasks_expected = 0;
259 for ( size_t i=0; i < depth; ++i )
260 g_tasks_expected += FindNumOfTasks( g_depths[i].value() );
261 // Test for iterators over values convertible to work item type
262 TestIterator_Movable<size_t*>(depth);
263 // Test for random access iterators
264 TestIterator_Modifiable<value_t*>(depth);
265 // Test for input iterators
266 TestIterator_Modifiable<Harness::InputIterator<value_t> >(depth);
267 // Test for forward iterators
268 TestIterator_Modifiable<Harness::ForwardIterator<value_t> >(depth);
269 // Test for const random access iterators
270 TestIterator_Const<Harness::ConstRandomIterator<value_t> >(depth);
271 }
272 }
273
274 const size_t elements = 10000;
275 const size_t init_sum = 0;
276 tbb::atomic<size_t> element_counter;
277
278 template<size_t K>
279 struct set_to {
operator ()set_to280 void operator()(size_t& x) const {
281 x = K;
282 ++element_counter;
283 }
284 };
285
286 #include "test_range_based_for.h"
287 #include <functional>
288 #include <deque>
289
range_do_test()290 void range_do_test() {
291 using namespace range_based_for_support_tests;
292 std::deque<size_t> v(elements, 0);
293
294 // iterator, const and non-const range check
295 element_counter = 0;
296 tbb::parallel_do(v.begin(), v.end(), set_to<1>());
297 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
298 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
299
300 element_counter = 0;
301 tbb::parallel_do(v, set_to<0>());
302 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
303 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all zeros");
304
305 element_counter = 0;
306 tbb::parallel_do(tbb::blocked_range<std::deque<size_t>::iterator>(v.begin(), v.end()), set_to<1>());
307 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
308 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
309
310 // same as above with context group
311 element_counter = 0;
312 tbb::task_group_context context;
313 tbb::parallel_do(v.begin(), v.end(), set_to<0>(), context);
314 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
315 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all ones");
316
317 element_counter = 0;
318 tbb::parallel_do(v, set_to<1>(), context);
319 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
320 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
321
322 element_counter = 0;
323 tbb::parallel_do(tbb::blocked_range<std::deque<size_t>::iterator>(v.begin(), v.end()), set_to<0>(), context);
324 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
325 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all zeros");
326 }
327
328 #if __TBB_CPP11_RVALUE_REF_PRESENT
329 namespace TestMoveSem {
330 struct MovePreferable : Movable {
MovePreferableTestMoveSem::MovePreferable331 MovePreferable() : Movable(), addtofeed(true) {}
MovePreferableTestMoveSem::MovePreferable332 MovePreferable(bool addtofeed_) : Movable(), addtofeed(addtofeed_) {}
MovePreferableTestMoveSem::MovePreferable333 MovePreferable(MovePreferable&& other) : Movable(std::move(other)), addtofeed(other.addtofeed) {};
334 // base class is explicitly initialized in the copy ctor to avoid -Wextra warnings
MovePreferableTestMoveSem::MovePreferable335 MovePreferable(const MovePreferable& other) : Movable(other) { REPORT("Error: copy ctor preferred.\n"); };
operator =TestMoveSem::MovePreferable336 MovePreferable& operator=(const MovePreferable&) { REPORT("Error: copy assign operator preferred.\n"); return *this; }
337 bool addtofeed;
338 };
339 struct MoveOnly : MovePreferable, NoCopy {
MoveOnlyTestMoveSem::MoveOnly340 MoveOnly() : MovePreferable() {}
MoveOnlyTestMoveSem::MoveOnly341 MoveOnly(bool addtofeed_) : MovePreferable(addtofeed_) {}
MoveOnlyTestMoveSem::MoveOnly342 MoveOnly(MoveOnly&& other) : MovePreferable(std::move(other)) {};
343 };
344 }
345
346 template<typename T>
RecordAndAdd(const T & in,tbb::parallel_do_feeder<T> & feeder)347 void RecordAndAdd(const T& in, tbb::parallel_do_feeder<T>& feeder) {
348 ASSERT(in.alive, "Got dead object in body");
349 size_t i = ++g_tasks_observed;
350 if (in.addtofeed) {
351 if (i%2) feeder.add(T(false));
352 else {
353 T a(false);
354 feeder.add(std::move(a));
355 }
356 }
357 }
358 // Take an item by rvalue reference
359 template<typename T>
360 struct TestMoveIteratorBody {
operator ()TestMoveIteratorBody361 void operator() (T&& in, tbb::parallel_do_feeder<T>& feeder) const { RecordAndAdd(in, feeder); }
362 };
363 // Take an item by value
364 template<typename T>
365 struct TestMoveIteratorBodyByValue {
operator ()TestMoveIteratorBodyByValue366 void operator() (T in, tbb::parallel_do_feeder<T>& feeder) const { RecordAndAdd(in, feeder); }
367 };
368
369 template<typename Iterator, typename Body>
TestMoveIterator()370 void TestMoveIterator() {
371 typedef typename std::iterator_traits<Iterator>::value_type value_type;
372
373 Body body;
374 const size_t size = 65;
375 g_tasks_observed = 0;
376 value_type a[size];
377 tbb::parallel_do( std::make_move_iterator(Iterator(a)), std::make_move_iterator(Iterator(a+size)), body );
378 ASSERT(size * 2 == g_tasks_observed, NULL);
379 }
380
381 template<typename T>
DoTestMoveSemantics()382 void DoTestMoveSemantics() {
383 TestMoveIterator< Harness::InputIterator<T>, TestMoveIteratorBody<T> >();
384 TestMoveIterator< Harness::ForwardIterator<T>, TestMoveIteratorBody<T> >();
385 TestMoveIterator< Harness::RandomIterator<T>, TestMoveIteratorBody<T> >();
386
387 TestMoveIterator< Harness::InputIterator<T>, TestMoveIteratorBodyByValue<T> >();
388 TestMoveIterator< Harness::ForwardIterator<T>, TestMoveIteratorBodyByValue<T> >();
389 TestMoveIterator< Harness::RandomIterator<T>, TestMoveIteratorBodyByValue<T> >();
390 }
391
TestMoveSemantics()392 void TestMoveSemantics() {
393 DoTestMoveSemantics<TestMoveSem::MovePreferable>();
394 #if __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT && !__TBB_IS_COPY_CONSTRUCTIBLE_BROKEN
395 // parallel_do uses is_copy_constructible to support non-copyable types
396 DoTestMoveSemantics<TestMoveSem::MoveOnly>();
397 #endif
398 }
399 #else /* __TBB_CPP11_RVALUE_REF_PRESENT */
TestMoveSemantics()400 void TestMoveSemantics() {
401 REPORT("Known issue: move support tests are skipped.\n");
402 }
403 #endif
404
TestMain()405 int TestMain () {
406 if( MinThread<1 ) {
407 REPORT("number of threads must be positive\n");
408 exit(1);
409 }
410 g_values_counter = 0;
411 for( int p=MinThread; p<=MaxThread; ++p ) {
412 tbb::task_scheduler_init init( p );
413 Run(p);
414 range_do_test();
415 // Test that all workers sleep when no work
416 TestCPUUserTime(p);
417 }
418 // This check must be performed after the scheduler terminated because only in this
419 // case there is a guarantee that the workers already destroyed their last tasks.
420 ASSERT( g_values_counter == 0, "Value objects were leaked" );
421
422 TestMoveSemantics();
423 return Harness::Done;
424 }
425