1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #if _MSC_VER && !defined(__INTEL_COMPILER)
18 #pragma warning(disable: 4180) // "qualifier applied to function type has no meaning; ignored"
19 #endif
20 
21 #include "tbb/parallel_for_each.h"
22 #include "tbb/task_scheduler_init.h"
23 #include "tbb/atomic.h"
24 #include "harness.h"
25 #include "harness_iterator.h"
26 #include <list>
27 
28 // Some old compilers can't deduce template parameter type for parallel_for_each
29 // if the function name is passed without explicit cast to function pointer.
30 typedef void (*TestFunctionType)(size_t);
31 
32 tbb::atomic<size_t> sum;
33 
34 // This function is called via parallel_for_each
TestFunction(size_t value)35 void TestFunction (size_t value) {
36     sum += (unsigned int)value;
37 }
38 
39 const size_t NUMBER_OF_ELEMENTS = 1000;
40 
41 // Tests tbb::parallel_for_each functionality
42 template <typename Iterator>
RunPForEachTests()43 void RunPForEachTests()
44 {
45     size_t test_vector[NUMBER_OF_ELEMENTS + 1];
46 
47     sum = 0;
48     size_t test_sum = 0;
49 
50     for (size_t i =0; i < NUMBER_OF_ELEMENTS; i++) {
51         test_vector[i] = i;
52         test_sum += i;
53     }
54     test_vector[NUMBER_OF_ELEMENTS] = 1000000; // parallel_for_each shouldn't touch this element
55 
56     Iterator begin(&test_vector[0]);
57     Iterator end(&test_vector[NUMBER_OF_ELEMENTS]);
58 
59     tbb::parallel_for_each(begin, end, (TestFunctionType)TestFunction);
60     ASSERT(sum == test_sum, "Not all items of test vector were processed by parallel_for_each");
61     ASSERT(test_vector[NUMBER_OF_ELEMENTS] == 1000000, "parallel_for_each processed an extra element");
62 }
63 
64 typedef void (*TestMutatorType)(size_t&);
65 
TestMutator(size_t & value)66 void TestMutator(size_t& value) {
67     ASSERT(value==0,NULL);
68     ++sum;
69     ++value;
70 }
71 
72 //! Test that tbb::parallel_for_each works for mutable iterators.
73 template <typename Iterator>
RunMutablePForEachTests()74 void RunMutablePForEachTests() {
75     size_t test_vector[NUMBER_OF_ELEMENTS];
76     for( size_t i=0; i<NUMBER_OF_ELEMENTS; ++i )
77         test_vector[i] = 0;
78     sum = 0;
79     tbb::parallel_for_each( Iterator(test_vector), Iterator(test_vector+NUMBER_OF_ELEMENTS), (TestMutatorType)TestMutator );
80     ASSERT( sum==NUMBER_OF_ELEMENTS, "parallel_for_each called function wrong number of times" );
81     for( size_t i=0; i<NUMBER_OF_ELEMENTS; ++i )
82         ASSERT( test_vector[i]==1, "parallel_for_each did not process each element exactly once" );
83 }
84 
85 #if __TBB_TASK_GROUP_CONTEXT
86 #define HARNESS_EH_SIMPLE_MODE 1
87 #include "tbb/tbb_exception.h"
88 #include "harness_eh.h"
89 
90 #if TBB_USE_EXCEPTIONS
test_function_with_exception(size_t)91 void test_function_with_exception(size_t) {
92     ThrowTestException();
93 }
94 
95 template <typename Iterator>
TestExceptionsSupport()96 void TestExceptionsSupport()
97 {
98     REMARK (__FUNCTION__);
99     size_t test_vector[NUMBER_OF_ELEMENTS + 1];
100 
101     for (size_t i = 0; i < NUMBER_OF_ELEMENTS; i++) {
102         test_vector[i] = i;
103     }
104 
105     Iterator begin(&test_vector[0]);
106     Iterator end(&test_vector[NUMBER_OF_ELEMENTS]);
107 
108     TRY();
109        tbb::parallel_for_each(begin, end, (TestFunctionType)test_function_with_exception);
110     CATCH_AND_ASSERT();
111 }
112 #endif /* TBB_USE_EXCEPTIONS */
113 
114 // Cancellation support test
function_to_cancel(size_t)115 void function_to_cancel(size_t ) {
116     ++g_CurExecuted;
117     CancellatorTask::WaitUntilReady();
118 }
119 
120 template <typename Iterator>
121 class my_worker_pforeach_task : public tbb::task
122 {
123     tbb::task_group_context &my_ctx;
124 
execute()125     tbb::task* execute () __TBB_override {
126         size_t test_vector[NUMBER_OF_ELEMENTS + 1];
127         for (size_t i = 0; i < NUMBER_OF_ELEMENTS; i++) {
128             test_vector[i] = i;
129         }
130         Iterator begin(&test_vector[0]);
131         Iterator end(&test_vector[NUMBER_OF_ELEMENTS]);
132 
133         tbb::parallel_for_each(begin, end, (TestFunctionType)function_to_cancel);
134 
135         return NULL;
136     }
137 public:
my_worker_pforeach_task(tbb::task_group_context & ctx)138     my_worker_pforeach_task ( tbb::task_group_context &ctx) : my_ctx(ctx) { }
139 };
140 
141 template <typename Iterator>
TestCancellation()142 void TestCancellation()
143 {
144     REMARK (__FUNCTION__);
145     ResetEhGlobals();
146     RunCancellationTest<my_worker_pforeach_task<Iterator>, CancellatorTask>();
147 }
148 #endif /* __TBB_TASK_GROUP_CONTEXT */
149 
150 #include "harness_cpu.h"
151 
152 const size_t elements = 10000;
153 const size_t init_sum = 0;
154 tbb::atomic<size_t> element_counter;
155 
156 template<size_t K>
157 struct set_to {
operator ()set_to158     void operator()(size_t& x) const {
159         x = K;
160         ++element_counter;
161     }
162 };
163 
164 #include "test_range_based_for.h"
165 #include <functional>
166 
range_for_each_test()167 void range_for_each_test() {
168     using namespace range_based_for_support_tests;
169     std::list<size_t> v(elements, 0);
170 
171     // iterator, const and non-const range check
172     element_counter = 0;
173     tbb::parallel_for_each(v.begin(), v.end(), set_to<1>());
174     ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
175     ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
176 
177     element_counter = 0;
178     tbb::parallel_for_each(v, set_to<0>());
179     ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
180     ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum , "elements of v not all zeros");
181 
182     element_counter = 0;
183     tbb::parallel_for_each(tbb::blocked_range<std::list<size_t>::iterator>(v.begin(), v.end()), set_to<1>());
184     ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
185     ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
186 
187     // iterator, const and non-const range check with context
188     element_counter = 0;
189     tbb::task_group_context context;
190     tbb::parallel_for_each(v.begin(), v.end(), set_to<0>(), context);
191     ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
192     ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum , "elements of v not all zeros");
193 
194     element_counter = 0;
195     tbb::parallel_for_each(v, set_to<1>(), context);
196     ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
197     ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
198 
199     element_counter = 0;
200     tbb::parallel_for_each(tbb::blocked_range<std::list<size_t>::iterator>(v.begin(), v.end()), set_to<0>(), context);
201     ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
202     ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum , "elements of v not all zeros");
203 }
204 
TestMain()205 int TestMain () {
206     if( MinThread<1 ) {
207         REPORT("number of threads must be positive\n");
208         exit(1);
209     }
210     for( int p=MinThread; p<=MaxThread; ++p ) {
211         tbb::task_scheduler_init init( p );
212 
213         RunPForEachTests<Harness::RandomIterator<size_t> >();
214         RunPForEachTests<Harness::ConstRandomIterator<size_t> >();
215         RunPForEachTests<Harness::InputIterator<size_t> >();
216         RunPForEachTests<Harness::ForwardIterator<size_t> >();
217 
218         RunMutablePForEachTests<Harness::RandomIterator<size_t> >();
219         RunMutablePForEachTests<Harness::ForwardIterator<size_t> >();
220 
221 #if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
222         TestExceptionsSupport<Harness::RandomIterator<size_t> >();
223         TestExceptionsSupport<Harness::InputIterator<size_t> >();
224         TestExceptionsSupport<Harness::ForwardIterator<size_t> >();
225 #endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
226 
227 #if __TBB_TASK_GROUP_CONTEXT
228         if (p > 1) {
229             TestCancellation<Harness::RandomIterator<size_t> >();
230             TestCancellation<Harness::InputIterator<size_t> >();
231             TestCancellation<Harness::ForwardIterator<size_t> >();
232         }
233 #endif /* __TBB_TASK_GROUP_CONTEXT */
234 
235         range_for_each_test();
236 
237         // Test that all workers sleep when no work
238         TestCPUUserTime(p);
239     }
240 #if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
241     REPORT("Known issue: exception handling tests are skipped.\n");
242 #endif
243     return Harness::Done;
244 }
245