1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 // Before including pipeline.h, set up the variable to count heap allocated
18 // filter_node objects, and make it known for the header.
19 int filter_node_count = 0;
20 #define __TBB_TEST_FILTER_NODE_COUNT filter_node_count
21 #include "tbb/pipeline.h"
22 
23 #include "tbb/atomic.h"
24 #include "harness.h"
25 #include <string.h>
26 
27 #include "tbb/tbb_allocator.h"
28 #include "tbb/spin_mutex.h"
29 
30 #if __TBB_CPP11_RVALUE_REF_PRESENT
31 #include <memory> // std::unique_ptr
32 #endif
33 
34 const unsigned n_tokens = 8;
35 // we can conceivably have two buffers used in the middle filter for every token in flight, so
36 // we must allocate two buffers for every token.  Unlikely, but possible.
37 const unsigned n_buffers = 2*n_tokens;
38 const int max_counter = 16;
39 static tbb::atomic<int> output_counter;
40 static tbb::atomic<int> input_counter;
41 static tbb::atomic<int> non_pointer_specialized_calls;
42 static tbb::atomic<int> pointer_specialized_calls;
43 static tbb::atomic<int> first_pointer_specialized_calls;
44 static tbb::atomic<int> second_pointer_specialized_calls;
45 static tbb::spin_mutex buffer_mutex;
46 
47 static int intbuffer[max_counter];  // store results for <int,int> parallel pipeline test
48 static bool check_intbuffer;
49 
50 static void* buffers[n_buffers];
51 static bool buf_available[n_buffers];
52 
fetchNextBuffer()53 void *fetchNextBuffer() {
54     tbb::spin_mutex::scoped_lock sl1(buffer_mutex);
55     for(size_t icnt = 0; icnt < n_buffers; ++icnt) {
56         if(buf_available[icnt]) {
57             buf_available[icnt] = false;
58             return buffers[icnt];
59         }
60     }
61     ASSERT(0, "Ran out of buffers");
62     return 0;
63 }
freeBuffer(void * buf)64 void freeBuffer(void *buf) {
65     for(size_t i=0; i < n_buffers;++i) {
66         if(buffers[i] == buf) {
67             buf_available[i] = true;
68             return;
69         }
70     }
71     ASSERT(0, "Tried to free a buffer not in our list");
72 }
73 
74 template<typename T>
75 class free_on_scope_exit {
76 public:
free_on_scope_exit(T * p)77     free_on_scope_exit(T *p) : my_p(p) {}
~free_on_scope_exit()78     ~free_on_scope_exit() { if(!my_p) return; my_p->~T(); freeBuffer(my_p); }
79 private:
80     T *my_p;
81 };
82 
83 #include "harness_checktype.h"
84 
85 // methods for testing check_type< >, that return okay values for other types.
86 template<typename T>
middle_is_ready(T &)87 bool middle_is_ready(T &/*p*/) { return false; }
88 
89 template<typename U>
middle_is_ready(check_type<U> & p)90 bool middle_is_ready(check_type<U> &p) { return p.is_ready(); }
91 
92 template<typename T>
output_is_ready(T &)93 bool output_is_ready(T &/*p*/) { return true; }
94 
95 template<typename U>
output_is_ready(check_type<U> & p)96 bool output_is_ready(check_type<U> &p) { return p.is_ready(); }
97 
98 template<typename T>
middle_my_id(T &)99 int middle_my_id( T &/*p*/) { return 0; }
100 
101 template<typename U>
middle_my_id(check_type<U> & p)102 int middle_my_id(check_type<U> &p) { return p.my_id(); }
103 
104 template<typename T>
output_my_id(T &)105 int output_my_id( T &/*p*/) { return 1; }
106 
107 template<typename U>
output_my_id(check_type<U> & p)108 int output_my_id(check_type<U> &p) { return p.my_id(); }
109 
110 template<typename T>
my_function(T & p)111 void my_function(T &p) { p = 0; }
112 
113 template<typename U>
my_function(check_type<U> & p)114 void my_function(check_type<U> &p) { p.function(); }
115 
116 // Filters must be copy-constructible, and be const-qualifiable.
117 template<typename U>
118 class input_filter : Harness::NoAfterlife {
119 public:
operator ()(tbb::flow_control & control) const120     U operator()( tbb::flow_control& control ) const {
121         AssertLive();
122         if( --input_counter < 0 ) {
123             control.stop();
124         }
125         else  // only count successful reads
126             ++non_pointer_specialized_calls;
127         return U();  // default constructed
128     }
129 
130 };
131 
132 // specialization for pointer
133 template<typename U>
134 class input_filter<U*> : Harness::NoAfterlife {
135 public:
operator ()(tbb::flow_control & control) const136     U* operator()(tbb::flow_control& control) const {
137         AssertLive();
138         int ival = --input_counter;
139         if(ival < 0) {
140             control.stop();
141             return NULL;
142         }
143         ++pointer_specialized_calls;
144         if(ival == max_counter / 2) {
145             return NULL;  // non-stop NULL
146         }
147         U* myReturn = new(fetchNextBuffer()) U();
148         return myReturn;
149     }
150 };
151 
152 template<>
153 class input_filter<void> : Harness::NoAfterlife {
154 public:
operator ()(tbb::flow_control & control) const155     void operator()( tbb::flow_control& control ) const {
156         AssertLive();
157         if( --input_counter < 0 ) {
158             control.stop();
159         }
160         else
161             ++non_pointer_specialized_calls;
162     }
163 
164 };
165 
166 // specialization for int that passes back a sequence of integers
167 template<>
168 class input_filter<int> : Harness::NoAfterlife {
169 public:
170     int
operator ()(tbb::flow_control & control) const171     operator()(tbb::flow_control& control ) const {
172         AssertLive();
173         int oldval = --input_counter;
174         if( oldval < 0 ) {
175             control.stop();
176         }
177         else
178             ++non_pointer_specialized_calls;
179         return oldval+1;
180     }
181 };
182 
183 template<typename T, typename U>
184 class middle_filter : Harness::NoAfterlife {
185 public:
operator ()(T t) const186     U operator()(T t) const {
187         AssertLive();
188         ASSERT(!middle_my_id(t), "bad id value");
189         ASSERT(!middle_is_ready(t), "Already ready" );
190         U out;
191         my_function(out);
192         ++non_pointer_specialized_calls;
193         return out;
194     }
195 };
196 
197 template<typename T, typename U>
198 class middle_filter<T*,U> : Harness::NoAfterlife {
199 public:
operator ()(T * my_storage) const200     U operator()(T* my_storage) const {
201         free_on_scope_exit<T> my_ptr(my_storage);  // free_on_scope_exit marks the buffer available
202         AssertLive();
203         if(my_storage) {  // may have been passed in a NULL
204             ASSERT(!middle_my_id(*my_storage), "bad id value");
205             ASSERT(!middle_is_ready(*my_storage), "Already ready" );
206         }
207         ++first_pointer_specialized_calls;
208         U out;
209         my_function(out);
210         return out;
211     }
212 };
213 
214 template<typename T, typename U>
215 class middle_filter<T,U*> : Harness::NoAfterlife {
216 public:
operator ()(T my_storage) const217     U* operator()(T my_storage) const {
218         AssertLive();
219         ASSERT(!middle_my_id(my_storage), "bad id value");
220         ASSERT(!middle_is_ready(my_storage), "Already ready" );
221         // allocate new space from buffers
222         U* my_return = new(fetchNextBuffer()) U();
223         my_function(*my_return);
224         ++second_pointer_specialized_calls;
225         return my_return;
226     }
227 };
228 
229 template<typename T, typename U>
230 class middle_filter<T*,U*> : Harness::NoAfterlife {
231 public:
operator ()(T * my_storage) const232     U* operator()(T* my_storage) const {
233         free_on_scope_exit<T> my_ptr(my_storage);  // free_on_scope_exit marks the buffer available
234         AssertLive();
235         if(my_storage) {
236             ASSERT(!middle_my_id(*my_storage), "bad id value");
237             ASSERT(!middle_is_ready(*my_storage), "Already ready" );
238         }
239         // may have been passed a NULL
240         ++pointer_specialized_calls;
241         if(!my_storage) return NULL;
242         ASSERT(!middle_my_id(*my_storage), "bad id value");
243         ASSERT(!middle_is_ready(*my_storage), "Already ready" );
244         U* my_return = new(fetchNextBuffer()) U();
245         my_function(*my_return);
246         return my_return;
247     }
248 };
249 
250 // specialization for int that squares the input and returns that.
251 template<>
252 class middle_filter<int,int> : Harness::NoAfterlife {
253 public:
operator ()(int my_input) const254     int operator()(int my_input) const {
255         AssertLive();
256         ++non_pointer_specialized_calls;
257         return my_input*my_input;
258     }
259 };
260 
261 // ---------------------------------
262 template<typename T>
263 class output_filter : Harness::NoAfterlife {
264 public:
operator ()(T c) const265     void operator()(T c) const {
266         AssertLive();
267         ASSERT(output_my_id(c), "unset id value");
268         ASSERT(output_is_ready(c), "not yet ready");
269         ++non_pointer_specialized_calls;
270         output_counter++;
271     }
272 };
273 
274 // specialization for int that puts the received value in an array
275 template<>
276 class output_filter<int> : Harness::NoAfterlife {
277 public:
operator ()(int my_input) const278     void operator()(int my_input) const {
279         AssertLive();
280         ++non_pointer_specialized_calls;
281         int myindx = output_counter++;
282         intbuffer[myindx] = my_input;
283     }
284 };
285 
286 
287 template<typename T>
288 class output_filter<T*> : Harness::NoAfterlife {
289 public:
operator ()(T * c) const290     void operator()(T* c) const {
291         free_on_scope_exit<T> my_ptr(c);
292         AssertLive();
293         if(c) {
294             ASSERT(output_my_id(*c), "unset id value");
295             ASSERT(output_is_ready(*c), "not yet ready");
296         }
297         output_counter++;
298         ++pointer_specialized_calls;
299     }
300 };
301 
302 typedef enum {
303     no_pointer_counts,
304     assert_nonpointer,
305     assert_firstpointer,
306     assert_secondpointer,
307     assert_allpointer
308 } final_assert_type;
309 
resetCounters()310 void resetCounters() {
311     output_counter = 0;
312     input_counter = max_counter;
313     non_pointer_specialized_calls = 0;
314     pointer_specialized_calls = 0;
315     first_pointer_specialized_calls = 0;
316     second_pointer_specialized_calls = 0;
317     // we have to reset the buffer flags because our input filters return allocated space on end-of-input,
318     // (on eof a default-constructed object is returned) and they do not pass through the filter further.
319     for(size_t i = 0; i < n_buffers; ++i)
320         buf_available[i] = true;
321 }
322 
checkCounters(final_assert_type my_t)323 void checkCounters(final_assert_type my_t) {
324     ASSERT(output_counter == max_counter, "not all tokens were passed through pipeline");
325     switch(my_t) {
326         case assert_nonpointer:
327             ASSERT(pointer_specialized_calls+first_pointer_specialized_calls+second_pointer_specialized_calls == 0, "non-pointer filters specialized to pointer");
328             ASSERT(non_pointer_specialized_calls == 3*max_counter, "bad count for non-pointer filters");
329             if(check_intbuffer) {
330                 for(int i = 1; i <= max_counter; ++i) {
331                     int j = i*i;
332                     bool found_val = false;
333                     for(int k = 0; k < max_counter; ++k) {
334                         if(intbuffer[k] == j) {
335                             found_val = true;
336                             break;
337                         }
338                     }
339                     ASSERT(found_val, "Missing value in output array" );
340                 }
341             }
342             break;
343         case assert_firstpointer:
344             ASSERT(pointer_specialized_calls == max_counter &&  // input filter extra invocation
345                     first_pointer_specialized_calls == max_counter &&
346                     non_pointer_specialized_calls == max_counter &&
347                     second_pointer_specialized_calls == 0, "incorrect specialization for firstpointer");
348             break;
349         case assert_secondpointer:
350             ASSERT(pointer_specialized_calls == max_counter &&
351                     first_pointer_specialized_calls == 0 &&
352                     non_pointer_specialized_calls == max_counter &&  // input filter
353                     second_pointer_specialized_calls == max_counter, "incorrect specialization for firstpointer");
354             break;
355         case assert_allpointer:
356             ASSERT(non_pointer_specialized_calls+first_pointer_specialized_calls+second_pointer_specialized_calls == 0, "pointer filters specialized to non-pointer");
357             ASSERT(pointer_specialized_calls == 3*max_counter, "bad count for pointer filters");
358             break;
359         case no_pointer_counts:
360             break;
361     }
362 }
363 
364 static const tbb::filter::mode filter_table[] = { tbb::filter::parallel, tbb::filter::serial_in_order, tbb::filter::serial_out_of_order};
365 const unsigned number_of_filter_types = sizeof(filter_table)/sizeof(filter_table[0]);
366 
367 typedef tbb::filter_t<void, void> filter_chain;
368 typedef tbb::filter::mode mode_array;
369 
370 // The filters are passed by value, which forces a temporary copy to be created.  This is
371 // to reproduce the bug where a filter_chain uses refs to filters, which after a call
372 // would be references to destructed temporaries.
373 template<typename type1, typename type2>
fill_chain(filter_chain & my_chain,mode_array * filter_type,input_filter<type1> i_filter,middle_filter<type1,type2> m_filter,output_filter<type2> o_filter)374 void fill_chain( filter_chain &my_chain, mode_array *filter_type, input_filter<type1> i_filter,
375          middle_filter<type1, type2> m_filter, output_filter<type2> o_filter ) {
376     my_chain = tbb::make_filter<void, type1>(filter_type[0], i_filter) &
377         tbb::make_filter<type1, type2>(filter_type[1], m_filter) &
378         tbb::make_filter<type2, void>(filter_type[2], o_filter);
379 }
380 
run_function_spec()381 void run_function_spec() {
382     ASSERT(!filter_node_count, NULL);
383     REMARK("Testing < void, void > (single filter in pipeline)");
384 #if __TBB_CPP11_LAMBDAS_PRESENT
385     REMARK( " ( + lambdas)");
386 #endif
387     REMARK("\n");
388     input_filter<void> i_filter;
389     // Test pipeline that contains only one filter
390     for( unsigned i = 0; i<number_of_filter_types; i++) {
391         tbb::filter_t<void, void> one_filter( filter_table[i], i_filter );
392         ASSERT(filter_node_count==1, "some filter nodes left after previous iteration?");
393         resetCounters();
394         tbb::parallel_pipeline( n_tokens, one_filter );
395         // no need to check counters
396 #if __TBB_CPP11_LAMBDAS_PRESENT
397         tbb::atomic<int> counter;
398         counter = max_counter;
399         // Construct filter using lambda-syntax when parallel_pipeline() is being run;
400         tbb::parallel_pipeline( n_tokens,
401             tbb::make_filter<void, void>(filter_table[i], [&counter]( tbb::flow_control& control ) {
402                     if( counter-- == 0 )
403                         control.stop();
404                     }
405             )
406         );
407 #endif
408     }
409     ASSERT(!filter_node_count, "filter_node objects leaked");
410 }
411 
412 template<typename t1, typename t2>
run_filter_set(input_filter<t1> & i_filter,middle_filter<t1,t2> & m_filter,output_filter<t2> & o_filter,mode_array * filter_type,final_assert_type my_t)413 void run_filter_set(
414         input_filter<t1>& i_filter,
415         middle_filter<t1,t2>& m_filter,
416         output_filter<t2>& o_filter,
417         mode_array *filter_type,
418         final_assert_type my_t) {
419     tbb::filter_t<void, t1> filter1( filter_type[0], i_filter );
420     tbb::filter_t<t1, t2> filter2( filter_type[1], m_filter );
421     tbb::filter_t<t2, void> filter3( filter_type[2], o_filter );
422     ASSERT(filter_node_count==3, "some filter nodes left after previous iteration?");
423     resetCounters();
424     // Create filters sequence when parallel_pipeline() is being run
425     tbb::parallel_pipeline( n_tokens, filter1 & filter2 & filter3 );
426     checkCounters(my_t);
427 
428     // Create filters sequence partially outside parallel_pipeline() and also when parallel_pipeline() is being run
429     tbb::filter_t<void, t2> filter12;
430     filter12 = filter1 & filter2;
431     resetCounters();
432     tbb::parallel_pipeline( n_tokens, filter12 & filter3 );
433     checkCounters(my_t);
434 
435     tbb::filter_t<void, void> filter123 = filter12 & filter3;
436     // Run pipeline twice with the same filter sequence
437     for( unsigned i = 0; i<2; i++ ) {
438         resetCounters();
439         tbb::parallel_pipeline( n_tokens, filter123 );
440         checkCounters(my_t);
441     }
442 
443     // Now copy-construct another filter_t instance, and use it to run pipeline
444     {
445         tbb::filter_t<void, void> copy123( filter123 );
446         resetCounters();
447         tbb::parallel_pipeline( n_tokens, copy123 );
448         checkCounters(my_t);
449     }
450 
451     // Construct filters and create the sequence when parallel_pipeline() is being run
452     resetCounters();
453     tbb::parallel_pipeline( n_tokens,
454                tbb::make_filter<void, t1>(filter_type[0], i_filter) &
455                tbb::make_filter<t1, t2>(filter_type[1], m_filter) &
456                tbb::make_filter<t2, void>(filter_type[2], o_filter) );
457     checkCounters(my_t);
458 
459     // Construct filters, make a copy, destroy the original filters, and run with the copy
460     int cnt = filter_node_count;
461     {
462         tbb::filter_t<void, void>* p123 = new tbb::filter_t<void,void> (
463                tbb::make_filter<void, t1>(filter_type[0], i_filter) &
464                tbb::make_filter<t1, t2>(filter_type[1], m_filter) &
465                tbb::make_filter<t2, void>(filter_type[2], o_filter) );
466         ASSERT(filter_node_count==cnt+5, "filter node accounting error?");
467         tbb::filter_t<void, void> copy123( *p123 );
468         delete p123;
469         ASSERT(filter_node_count==cnt+5, "filter nodes deleted prematurely?");
470         resetCounters();
471         tbb::parallel_pipeline( n_tokens, copy123 );
472         checkCounters(my_t);
473     }
474 
475     // construct a filter with temporaries
476     {
477         tbb::filter_t<void, void> my_filter;
478         fill_chain<t1,t2>( my_filter, filter_type, i_filter, m_filter, o_filter );
479         resetCounters();
480         tbb::parallel_pipeline( n_tokens, my_filter );
481         checkCounters(my_t);
482     }
483     ASSERT(filter_node_count==cnt, "scope ended but filter nodes not deleted?");
484 }
485 
486 #if __TBB_CPP11_LAMBDAS_PRESENT
487 template <typename t1, typename t2>
run_lambdas_test(mode_array * filter_type)488 void run_lambdas_test( mode_array *filter_type ) {
489     tbb::atomic<int> counter;
490     counter = max_counter;
491     // Construct filters using lambda-syntax and create the sequence when parallel_pipeline() is being run;
492     resetCounters();  // only need the output_counter reset.
493     tbb::parallel_pipeline( n_tokens,
494         tbb::make_filter<void, t1>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1 {
495                 if( --counter < 0 )
496                     control.stop();
497                 return t1(); }
498         ) &
499         tbb::make_filter<t1, t2>(filter_type[1], []( t1 /*my_storage*/ ) -> t2 {
500                 return t2(); }
501         ) &
502         tbb::make_filter<t2, void>(filter_type[2], [] ( t2 ) -> void {
503                 output_counter++; }
504         )
505     );
506     checkCounters(no_pointer_counts);  // don't have to worry about specializations
507     counter = max_counter;
508     // pointer filters
509     resetCounters();
510     tbb::parallel_pipeline( n_tokens,
511         tbb::make_filter<void, t1*>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1* {
512                 if( --counter < 0 ) {
513                     control.stop();
514                     return NULL;
515                 }
516                 return new(fetchNextBuffer()) t1(); }
517         ) &
518         tbb::make_filter<t1*, t2*>(filter_type[1], []( t1* my_storage ) -> t2* {
519                 tbb::tbb_allocator<t1>().destroy(my_storage); // my_storage->~t1();
520                 return new(my_storage) t2(); }
521         ) &
522         tbb::make_filter<t2*, void>(filter_type[2], [] ( t2* my_storage ) -> void {
523                 tbb::tbb_allocator<t2>().destroy(my_storage);  // my_storage->~t2();
524                 freeBuffer(my_storage);
525                 output_counter++; }
526         )
527     );
528     checkCounters(no_pointer_counts);
529     // first filter outputs pointer
530     counter = max_counter;
531     resetCounters();
532     tbb::parallel_pipeline( n_tokens,
533         tbb::make_filter<void, t1*>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1* {
534                 if( --counter < 0 ) {
535                     control.stop();
536                     return NULL;
537                 }
538                 return new(fetchNextBuffer()) t1(); }
539         ) &
540         tbb::make_filter<t1*, t2>(filter_type[1], []( t1* my_storage ) -> t2 {
541                 tbb::tbb_allocator<t1>().destroy(my_storage);   // my_storage->~t1();
542                 freeBuffer(my_storage);
543                 return t2(); }
544         ) &
545         tbb::make_filter<t2, void>(filter_type[2], [] ( t2 /*my_storage*/) -> void {
546                 output_counter++; }
547         )
548     );
549     checkCounters(no_pointer_counts);
550     // second filter outputs pointer
551     counter = max_counter;
552     resetCounters();
553     tbb::parallel_pipeline( n_tokens,
554         tbb::make_filter<void, t1>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1 {
555                 if( --counter < 0 ) {
556                     control.stop();
557                 }
558                 return t1(); }
559         ) &
560         tbb::make_filter<t1, t2*>(filter_type[1], []( t1 /*my_storage*/ ) -> t2* {
561                 return new(fetchNextBuffer()) t2(); }
562         ) &
563         tbb::make_filter<t2*, void>(filter_type[2], [] ( t2* my_storage) -> void {
564                 tbb::tbb_allocator<t2>().destroy(my_storage);  // my_storage->~t2();
565                 freeBuffer(my_storage);
566                 output_counter++; }
567         )
568     );
569     checkCounters(no_pointer_counts);
570 }
571 #endif
572 
573 template<typename type1, typename type2>
run_function(const char * l1,const char * l2)574 void run_function(const char *l1, const char *l2) {
575     ASSERT(!filter_node_count, NULL);
576     REMARK("Testing < %s, %s >", l1, l2 );
577 #if __TBB_CPP11_LAMBDAS_PRESENT
578     REMARK( " ( + lambdas)");
579 #endif
580     check_intbuffer = (!strcmp(l1,"int") && !strcmp(l2,"int"));
581     if(check_intbuffer) REMARK(", check output of filters");
582     REMARK("\n");
583 
584     Check<type1> check1;  // check constructions/destructions
585     Check<type2> check2;  // for type1 or type2 === check_type<T>
586 
587     const size_t number_of_filters = 3;
588 
589     input_filter<type1> i_filter;
590     input_filter<type1*> p_i_filter;
591 
592     middle_filter<type1, type2> m_filter;
593     middle_filter<type1*, type2> pr_m_filter;
594     middle_filter<type1, type2*> rp_m_filter;
595     middle_filter<type1*, type2*> pp_m_filter;
596 
597     output_filter<type2> o_filter;
598     output_filter<type2*> p_o_filter;
599 
600     // allocate the buffers for the filters
601     unsigned max_size = (sizeof(type1) > sizeof(type2) ) ? sizeof(type1) : sizeof(type2);
602     for(unsigned i = 0; i < (unsigned)n_buffers; ++i) {
603         buffers[i] = malloc(max_size);
604         buf_available[i] = true;
605     }
606 
607     unsigned limit = 1;
608     // Test pipeline that contains number_of_filters filters
609     for( unsigned i=0; i<number_of_filters; ++i)
610         limit *= number_of_filter_types;
611     // Iterate over possible filter sequences
612     for( unsigned numeral=0; numeral<limit; ++numeral ) {
613         unsigned temp = numeral;
614         tbb::filter::mode filter_type[number_of_filter_types];
615         for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types )
616             filter_type[i] = filter_table[temp%number_of_filter_types];
617 
618         run_filter_set<type1,type2>(i_filter, m_filter, o_filter, filter_type, assert_nonpointer );
619         run_filter_set<type1*,type2>(p_i_filter, pr_m_filter, o_filter, filter_type, assert_firstpointer);
620         run_filter_set<type1,type2*>(i_filter, rp_m_filter, p_o_filter, filter_type, assert_secondpointer);
621         run_filter_set<type1*,type2*>(p_i_filter, pp_m_filter, p_o_filter, filter_type, assert_allpointer);
622 
623 #if __TBB_CPP11_LAMBDAS_PRESENT
624         run_lambdas_test<type1,type2>(filter_type);
625 #endif
626     }
627     ASSERT(!filter_node_count, "filter_node objects leaked");
628 
629     for(unsigned i = 0; i < (unsigned)n_buffers; ++i) {
630         free(buffers[i]);
631     }
632 }
633 
634 #include "tbb/task_scheduler_init.h"
635 
TestMain()636 int TestMain() {
637 #if TBB_USE_DEBUG
638     // size and copyability.
639     REMARK("use_allocator<int>::value=%d\n", tbb::interface6::internal::use_allocator<int>::value);
640     REMARK("use_allocator<double>::value=%d\n", tbb::interface6::internal::use_allocator<double>::value);
641     REMARK("use_allocator<int *>::value=%d\n", tbb::interface6::internal::use_allocator<int *>::value);
642     REMARK("use_allocator<check_type<int> >::value=%d\n", tbb::interface6::internal::use_allocator<check_type<int> >::value);
643     REMARK("use_allocator<check_type<int>* >::value=%d\n", tbb::interface6::internal::use_allocator<check_type<int>* >::value);
644     REMARK("use_allocator<check_type<short> >::value=%d\n\n", tbb::interface6::internal::use_allocator<check_type<short> >::value);
645 #endif
646     // Test with varying number of threads.
647     for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
648         // Initialize TBB task scheduler
649         REMARK("\nTesting with nthread=%d\n", nthread);
650         tbb::task_scheduler_init init(nthread);
651 
652         // Run test several times with different types
653         run_function_spec();
654         #define RUN_FUNCTION(type1, type2) run_function<type1, type2>(#type1, #type2);
655         RUN_FUNCTION(size_t, int)
656         RUN_FUNCTION(int, double)
657         RUN_FUNCTION(size_t, double)
658         RUN_FUNCTION(size_t, bool)
659         RUN_FUNCTION(int, int)
660         RUN_FUNCTION(check_type<unsigned int>, size_t)
661         RUN_FUNCTION(check_type<unsigned short>, size_t)
662         RUN_FUNCTION(check_type<unsigned int>, check_type<unsigned int>)
663         RUN_FUNCTION(check_type<unsigned int>, check_type<unsigned short>)
664         RUN_FUNCTION(check_type<unsigned short>, check_type<unsigned short>)
665         RUN_FUNCTION(double, check_type<unsigned short>)
666 #if __TBB_CPP11_RVALUE_REF_PRESENT
667         RUN_FUNCTION(std::unique_ptr<int>, std::unique_ptr<int>) // move-only type
668 #endif
669         #undef RUN_FUNCTION
670     }
671     return Harness::Done;
672 }
673 
674