1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 //------------------------------------------------------------------------
18 // Test TBB mutexes when used with parallel_for.h
19 //
20 // Usage: test_Mutex.exe [-v] nthread
21 //
22 // The -v option causes timing information to be printed.
23 //
24 // Compile with _OPENMP and -openmp
25 //------------------------------------------------------------------------
26 #include "harness_defs.h"
27 #include "tbb/spin_mutex.h"
28 #include "tbb/critical_section.h"
29 #include "tbb/spin_rw_mutex.h"
30 #include "tbb/queuing_rw_mutex.h"
31 #include "tbb/queuing_mutex.h"
32 #include "tbb/mutex.h"
33 #include "tbb/recursive_mutex.h"
34 #include "tbb/null_mutex.h"
35 #include "tbb/null_rw_mutex.h"
36 #include "tbb/parallel_for.h"
37 #include "tbb/blocked_range.h"
38 #include "tbb/tick_count.h"
39 #include "tbb/atomic.h"
40 #include "harness.h"
41 #include <cstdlib>
42 #include <cstdio>
43 #if _OPENMP
44 #include "test/OpenMP_Mutex.h"
45 #endif /* _OPENMP */
46 #include "tbb/tbb_profiling.h"
47 
48 #ifndef TBB_TEST_LOW_WORKLOAD
49     #define TBB_TEST_LOW_WORKLOAD TBB_USE_THREADING_TOOLS
50 #endif
51 
52 // This test deliberately avoids a "using tbb" statement,
53 // so that the error of putting types in the wrong namespace will be caught.
54 
55 template<typename M>
56 struct Counter {
57     typedef M mutex_type;
58     M mutex;
59     volatile long value;
60 };
61 
62 //! Function object for use with parallel_for.h.
63 template<typename C>
64 struct AddOne: NoAssign {
65     C& counter;
66     /** Increments counter once for each iteration in the iteration space. */
operator ()AddOne67     void operator()( tbb::blocked_range<size_t>& range ) const {
68         for( size_t i=range.begin(); i!=range.end(); ++i ) {
69             if( i&1 ) {
70                 // Try implicit acquire and explicit release
71                 typename C::mutex_type::scoped_lock lock(counter.mutex);
72                 counter.value = counter.value+1;
73                 lock.release();
74             } else {
75                 // Try explicit acquire and implicit release
76                 typename C::mutex_type::scoped_lock lock;
77                 lock.acquire(counter.mutex);
78                 counter.value = counter.value+1;
79             }
80         }
81     }
AddOneAddOne82     AddOne( C& counter_ ) : counter(counter_) {}
83 };
84 
85 //! Adaptor for using ISO C++0x style mutex as a TBB-style mutex.
86 template<typename M>
87 class TBB_MutexFromISO_Mutex {
88     M my_iso_mutex;
89 public:
90     typedef TBB_MutexFromISO_Mutex mutex_type;
91 
92     class scoped_lock;
93     friend class scoped_lock;
94 
95     class scoped_lock {
96         mutex_type* my_mutex;
97     public:
scoped_lock()98         scoped_lock() : my_mutex(NULL) {}
scoped_lock(mutex_type & m)99         scoped_lock( mutex_type& m ) : my_mutex(NULL) {
100             acquire(m);
101         }
scoped_lock(mutex_type & m,bool is_writer)102         scoped_lock( mutex_type& m, bool is_writer ) : my_mutex(NULL) {
103             acquire(m,is_writer);
104         }
acquire(mutex_type & m)105         void acquire( mutex_type& m ) {
106             m.my_iso_mutex.lock();
107             my_mutex = &m;
108         }
try_acquire(mutex_type & m)109         bool try_acquire( mutex_type& m ) {
110             if( m.my_iso_mutex.try_lock() ) {
111                 my_mutex = &m;
112                 return true;
113             } else {
114                 return false;
115             }
116         }
release()117         void release() {
118             my_mutex->my_iso_mutex.unlock();
119             my_mutex = NULL;
120         }
121 
122         // Methods for reader-writer mutex
123         // These methods can be instantiated only if M supports lock_read() and try_lock_read().
124 
acquire(mutex_type & m,bool is_writer)125         void acquire( mutex_type& m, bool is_writer ) {
126             if( is_writer ) m.my_iso_mutex.lock();
127             else m.my_iso_mutex.lock_read();
128             my_mutex = &m;
129         }
try_acquire(mutex_type & m,bool is_writer)130         bool try_acquire( mutex_type& m, bool is_writer ) {
131             if( is_writer ? m.my_iso_mutex.try_lock() : m.my_iso_mutex.try_lock_read() ) {
132                 my_mutex = &m;
133                 return true;
134             } else {
135                 return false;
136             }
137         }
upgrade_to_writer()138         bool upgrade_to_writer() {
139             my_mutex->my_iso_mutex.unlock();
140             my_mutex->my_iso_mutex.lock();
141             return false;
142         }
downgrade_to_reader()143         bool downgrade_to_reader() {
144             my_mutex->my_iso_mutex.unlock();
145             my_mutex->my_iso_mutex.lock_read();
146             return false;
147         }
~scoped_lock()148         ~scoped_lock() {
149             if( my_mutex )
150                 release();
151         }
152     };
153 
154     static const bool is_recursive_mutex = M::is_recursive_mutex;
155     static const bool is_rw_mutex = M::is_rw_mutex;
156 };
157 
158 namespace tbb {
159     namespace profiling {
160         template<typename M>
set_name(const TBB_MutexFromISO_Mutex<M> &,const char *)161         void set_name( const TBB_MutexFromISO_Mutex<M>&, const char* ) {}
162     }
163 }
164 
165 //! Generic test of a TBB mutex type M.
166 /** Does not test features specific to reader-writer locks. */
167 template<typename M>
Test(const char * name)168 void Test( const char * name ) {
169     REMARK("%s size == %d, time = ",name, sizeof(M));
170     Counter<M> counter;
171     counter.value = 0;
172     tbb::profiling::set_name(counter.mutex, name);
173 #if TBB_TEST_LOW_WORKLOAD
174     const int n = 10000;
175 #else
176     const int n = 100000;
177 #endif /* TBB_TEST_LOW_WORKLOAD */
178     tbb::tick_count t0 = tbb::tick_count::now();
179     tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/10),AddOne<Counter<M> >(counter));
180     tbb::tick_count t1 = tbb::tick_count::now();
181     REMARK("%g usec\n",(t1-t0).seconds());
182     if( counter.value!=n )
183         REPORT("ERROR for %s: counter.value=%ld\n",name,counter.value);
184 }
185 
186 template<typename M, size_t N>
187 struct Invariant {
188     typedef M mutex_type;
189     M mutex;
190     const char* mutex_name;
191     volatile long value[N];
InvariantInvariant192     Invariant( const char* mutex_name_ ) :
193         mutex_name(mutex_name_)
194     {
195         for( size_t k=0; k<N; ++k )
196             value[k] = 0;
197         tbb::profiling::set_name(mutex, mutex_name_);
198     }
~InvariantInvariant199     ~Invariant() {
200     }
updateInvariant201     void update() {
202         for( size_t k=0; k<N; ++k )
203             ++value[k];
204     }
value_isInvariant205     bool value_is( long expected_value ) const {
206         long tmp;
207         for( size_t k=0; k<N; ++k )
208             if( (tmp=value[k])!=expected_value ) {
209                 REPORT("ERROR: %ld!=%ld\n", tmp, expected_value);
210                 return false;
211             }
212         return true;
213     }
is_okayInvariant214     bool is_okay() {
215         return value_is( value[0] );
216     }
217 };
218 
219 //! Function object for use with parallel_for.h.
220 template<typename I>
221 struct TwiddleInvariant: NoAssign {
222     I& invariant;
TwiddleInvariantTwiddleInvariant223     TwiddleInvariant( I& invariant_ ) : invariant(invariant_) {}
224 
225     /** Increments counter once for each iteration in the iteration space. */
operator ()TwiddleInvariant226     void operator()( tbb::blocked_range<size_t>& range ) const {
227         for( size_t i=range.begin(); i!=range.end(); ++i ) {
228             //! Every 8th access is a write access
229             const bool write = (i%8)==7;
230             bool okay = true;
231             bool lock_kept = true;
232             if( (i/8)&1 ) {
233                 // Try implicit acquire and explicit release
234                 typename I::mutex_type::scoped_lock lock(invariant.mutex,write);
235                 execute_aux(lock, i, write, /*ref*/okay, /*ref*/lock_kept);
236                 lock.release();
237             } else {
238                 // Try explicit acquire and implicit release
239                 typename I::mutex_type::scoped_lock lock;
240                 lock.acquire(invariant.mutex,write);
241                 execute_aux(lock, i, write, /*ref*/okay, /*ref*/lock_kept);
242             }
243             if( !okay ) {
244                 REPORT( "ERROR for %s at %ld: %s %s %s %s\n",invariant.mutex_name, long(i),
245                         write     ? "write,"                  : "read,",
246                         write     ? (i%16==7?"downgrade,":"") : (i%8==3?"upgrade,":""),
247                         lock_kept ? "lock kept,"              : "lock not kept,", // TODO: only if downgrade/upgrade
248                         (i/8)&1   ? "impl/expl"               : "expl/impl" );
249             }
250         }
251     }
252 private:
execute_auxTwiddleInvariant253     void execute_aux(typename I::mutex_type::scoped_lock & lock, const size_t i, const bool write, bool & okay, bool & lock_kept) const {
254         if( write ) {
255             long my_value = invariant.value[0];
256             invariant.update();
257             if( i%16==7 ) {
258                 lock_kept = lock.downgrade_to_reader();
259                 if( !lock_kept )
260                     my_value = invariant.value[0] - 1;
261                 okay = invariant.value_is(my_value+1);
262             }
263         } else {
264             okay = invariant.is_okay();
265             if( i%8==3 ) {
266                 long my_value = invariant.value[0];
267                 lock_kept = lock.upgrade_to_writer();
268                 if( !lock_kept )
269                     my_value = invariant.value[0];
270                 invariant.update();
271                 okay = invariant.value_is(my_value+1);
272             }
273         }
274     }
275 };
276 
277 /** This test is generic so that we can test any other kinds of ReaderWriter locks we write later. */
278 template<typename M>
TestReaderWriterLock(const char * mutex_name)279 void TestReaderWriterLock( const char * mutex_name ) {
280     REMARK( "%s readers & writers time = ", mutex_name );
281     Invariant<M,8> invariant(mutex_name);
282 #if TBB_TEST_LOW_WORKLOAD
283     const size_t n = 10000;
284 #else
285     const size_t n = 500000;
286 #endif /* TBB_TEST_LOW_WORKLOAD */
287     tbb::tick_count t0 = tbb::tick_count::now();
288     tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/100),TwiddleInvariant<Invariant<M,8> >(invariant));
289     tbb::tick_count t1 = tbb::tick_count::now();
290     // There is either a writer or a reader upgraded to a writer for each 4th iteration
291     long expected_value = n/4;
292     if( !invariant.value_is(expected_value) )
293         REPORT("ERROR for %s: final invariant value is wrong\n",mutex_name);
294     REMARK( "%g usec\n", (t1-t0).seconds() );
295 }
296 
297 #if _MSC_VER && !defined(__INTEL_COMPILER)
298     // Suppress "conditional expression is constant" warning.
299     #pragma warning( push )
300     #pragma warning( disable: 4127 )
301 #endif
302 
303 /** Test try_acquire_reader functionality of a non-reenterable reader-writer mutex */
304 template<typename M>
TestTryAcquireReader_OneThread(const char * mutex_name)305 void TestTryAcquireReader_OneThread( const char * mutex_name ) {
306     M tested_mutex;
307     typename M::scoped_lock lock1;
308     if( M::is_rw_mutex ) {
309         if( lock1.try_acquire(tested_mutex, false) )
310             lock1.release();
311         else
312             REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
313         {
314             typename M::scoped_lock lock2(tested_mutex, false);   // read lock
315             if( lock1.try_acquire(tested_mutex) )                 // attempt to acquire read
316                 REPORT("ERROR for %s: try_acquire succeeded though it should not (1)\n", mutex_name);
317             lock2.release();                                      // unlock
318             lock2.acquire(tested_mutex, true);                    // write lock
319             if( lock1.try_acquire(tested_mutex, false) )          // attempt to acquire read
320                 REPORT("ERROR for %s: try_acquire succeeded though it should not (2)\n", mutex_name);
321         }
322         if( lock1.try_acquire(tested_mutex, false) )
323             lock1.release();
324         else
325             REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
326     }
327 }
328 
329 /** Test try_acquire functionality of a non-reenterable mutex */
330 template<typename M>
TestTryAcquire_OneThread(const char * mutex_name)331 void TestTryAcquire_OneThread( const char * mutex_name ) {
332     M tested_mutex;
333     typename M::scoped_lock lock1;
334     if( lock1.try_acquire(tested_mutex) )
335         lock1.release();
336     else
337         REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
338     {
339         if( M::is_recursive_mutex ) {
340             typename M::scoped_lock lock2(tested_mutex);
341             if( lock1.try_acquire(tested_mutex) )
342                 lock1.release();
343             else
344                 REPORT("ERROR for %s: try_acquire on recursive lock failed though it should not\n", mutex_name);
345             //windows.. -- both are recursive
346         } else {
347             typename M::scoped_lock lock2(tested_mutex);
348             if( lock1.try_acquire(tested_mutex) )
349                 REPORT("ERROR for %s: try_acquire succeeded though it should not (3)\n", mutex_name);
350         }
351     }
352     if( lock1.try_acquire(tested_mutex) )
353         lock1.release();
354     else
355         REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
356 }
357 
358 #if _MSC_VER && !defined(__INTEL_COMPILER)
359     #pragma warning( pop )
360 #endif
361 
362 const int RecurN = 4;
363 int RecurArray[ RecurN ];
364 tbb::recursive_mutex RecurMutex[ RecurN ];
365 
366 struct RecursiveAcquisition {
367     /** x = number being decoded in base N
368         max_lock = index of highest lock acquired so far
369         mask = bit mask; ith bit set if lock i has been acquired. */
BodyRecursiveAcquisition370     void Body( size_t x, int max_lock=-1, unsigned int mask=0 ) const
371     {
372         int i = (int) (x % RecurN);
373         bool first = (mask&1U<<i)==0;
374         if( first ) {
375             // first time to acquire lock
376             if( i<max_lock )
377                 // out of order acquisition might lead to deadlock, so stop
378                 return;
379             max_lock = i;
380         }
381 
382         if( (i&1)!=0 ) {
383             // acquire lock on location RecurArray[i] using explicit acquire
384             tbb::recursive_mutex::scoped_lock r_lock;
385             r_lock.acquire( RecurMutex[i] );
386             int a = RecurArray[i];
387             ASSERT( (a==0)==first, "should be either a==0 if it is the first time to acquire the lock or a!=0 otherwise" );
388             ++RecurArray[i];
389             if( x )
390                 Body( x/RecurN, max_lock, mask|1U<<i );
391             --RecurArray[i];
392             ASSERT( a==RecurArray[i], "a is not equal to RecurArray[i]" );
393 
394             // release lock on location RecurArray[i] using explicit release; otherwise, use implicit one
395             if( (i&2)!=0 ) r_lock.release();
396         } else {
397             // acquire lock on location RecurArray[i] using implicit acquire
398             tbb::recursive_mutex::scoped_lock r_lock( RecurMutex[i] );
399             int a = RecurArray[i];
400 
401             ASSERT( (a==0)==first, "should be either a==0 if it is the first time to acquire the lock or a!=0 otherwise" );
402 
403             ++RecurArray[i];
404             if( x )
405                 Body( x/RecurN, max_lock, mask|1U<<i );
406             --RecurArray[i];
407 
408             ASSERT( a==RecurArray[i], "a is not equal to RecurArray[i]" );
409 
410             // release lock on location RecurArray[i] using explicit release; otherwise, use implicit one
411             if( (i&2)!=0 ) r_lock.release();
412         }
413     }
414 
operator ()RecursiveAcquisition415     void operator()( const tbb::blocked_range<size_t> &r ) const
416     {
417         for( size_t x=r.begin(); x<r.end(); x++ ) {
418             Body( x );
419         }
420     }
421 };
422 
423 /** This test is generic so that we may test other kinds of recursive mutexes.*/
424 template<typename M>
TestRecursiveMutex(const char * mutex_name)425 void TestRecursiveMutex( const char * mutex_name )
426 {
427     for ( int i = 0; i < RecurN; ++i ) {
428         tbb::profiling::set_name(RecurMutex[i], mutex_name);
429     }
430     tbb::tick_count t0 = tbb::tick_count::now();
431     tbb::parallel_for(tbb::blocked_range<size_t>(0,10000,500), RecursiveAcquisition());
432     tbb::tick_count t1 = tbb::tick_count::now();
433     REMARK( "%s recursive mutex time = %g usec\n", mutex_name, (t1-t0).seconds() );
434 }
435 
436 template<typename C>
437 struct NullRecursive: NoAssign {
recurse_tillNullRecursive438     void recurse_till( size_t i, size_t till ) const {
439         if( i==till ) {
440             counter.value = counter.value+1;
441             return;
442         }
443         if( i&1 ) {
444             typename C::mutex_type::scoped_lock lock2(counter.mutex);
445             recurse_till( i+1, till );
446             lock2.release();
447         } else {
448             typename C::mutex_type::scoped_lock lock2;
449             lock2.acquire(counter.mutex);
450             recurse_till( i+1, till );
451         }
452     }
453 
operator ()NullRecursive454     void operator()( tbb::blocked_range<size_t>& range ) const {
455         typename C::mutex_type::scoped_lock lock(counter.mutex);
456         recurse_till( range.begin(), range.end() );
457     }
NullRecursiveNullRecursive458     NullRecursive( C& counter_ ) : counter(counter_) {
459         ASSERT( C::mutex_type::is_recursive_mutex, "Null mutex should be a recursive mutex." );
460     }
461     C& counter;
462 };
463 
464 template<typename M>
465 struct NullUpgradeDowngrade: NoAssign {
operator ()NullUpgradeDowngrade466     void operator()( tbb::blocked_range<size_t>& range ) const {
467         typename M::scoped_lock lock2;
468         for( size_t i=range.begin(); i!=range.end(); ++i ) {
469             if( i&1 ) {
470                 typename M::scoped_lock lock1(my_mutex, true) ;
471                 if( lock1.downgrade_to_reader()==false )
472                     REPORT("ERROR for %s: downgrade should always succeed\n", name);
473             } else {
474                 lock2.acquire( my_mutex, false );
475                 if( lock2.upgrade_to_writer()==false )
476                     REPORT("ERROR for %s: upgrade should always succeed\n", name);
477                 lock2.release();
478             }
479         }
480     }
481 
NullUpgradeDowngradeNullUpgradeDowngrade482     NullUpgradeDowngrade( M& m_, const char* n_ ) : my_mutex(m_), name(n_) {}
483     M& my_mutex;
484     const char* name;
485 } ;
486 
487 template<typename M>
TestNullMutex(const char * name)488 void TestNullMutex( const char * name ) {
489     Counter<M> counter;
490     counter.value = 0;
491     const int n = 100;
492     REMARK("TestNullMutex<%s>",name);
493     {
494         tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),AddOne<Counter<M> >(counter));
495     }
496     counter.value = 0;
497     {
498         tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullRecursive<Counter<M> >(counter));
499     }
500     REMARK("\n");
501 }
502 
503 template<typename M>
TestNullRWMutex(const char * name)504 void TestNullRWMutex( const char * name ) {
505     REMARK("TestNullRWMutex<%s>",name);
506     const int n = 100;
507     M m;
508     tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullUpgradeDowngrade<M>(m, name));
509     REMARK("\n");
510 }
511 
512 //! Test ISO C++0x compatibility portion of TBB mutex
513 template<typename M>
TestISO(const char * name)514 void TestISO( const char * name ) {
515     typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
516     Test<tbb_from_iso>( name );
517 }
518 
519 //! Test ISO C++0x try_lock functionality of a non-reenterable mutex */
520 template<typename M>
TestTryAcquire_OneThreadISO(const char * name)521 void TestTryAcquire_OneThreadISO( const char * name ) {
522     typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
523     TestTryAcquire_OneThread<tbb_from_iso>( name );
524 }
525 
526 //! Test ISO-like C++0x compatibility portion of TBB reader-writer mutex
527 template<typename M>
TestReaderWriterLockISO(const char * name)528 void TestReaderWriterLockISO( const char * name ) {
529     typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
530     TestReaderWriterLock<tbb_from_iso>( name );
531     TestTryAcquireReader_OneThread<tbb_from_iso>( name );
532 }
533 
534 //! Test ISO C++0x compatibility portion of TBB recursive mutex
535 template<typename M>
TestRecursiveMutexISO(const char * name)536 void TestRecursiveMutexISO( const char * name ) {
537     typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
538     TestRecursiveMutex<tbb_from_iso>(name);
539 }
540 
541 #include "harness_tsx.h"
542 #include "tbb/task_scheduler_init.h"
543 
544 #if __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER
545 
546 //! Function object for use with parallel_for.h to see if a transaction is actually attempted.
547 tbb::atomic<size_t> n_transactions_attempted;
548 template<typename C>
549 struct AddOne_CheckTransaction: NoAssign {
550     C& counter;
551     /** Increments counter once for each iteration in the iteration space. */
operator ()AddOne_CheckTransaction552     void operator()( tbb::blocked_range<size_t>& range ) const {
553         for( size_t i=range.begin(); i!=range.end(); ++i ) {
554             bool transaction_attempted = false;
555             {
556               typename C::mutex_type::scoped_lock lock(counter.mutex);
557               if( IsInsideTx() ) transaction_attempted = true;
558               counter.value = counter.value+1;
559             }
560             if( transaction_attempted ) ++n_transactions_attempted;
561             __TBB_Pause(i);
562         }
563     }
AddOne_CheckTransactionAddOne_CheckTransaction564     AddOne_CheckTransaction( C& counter_ ) : counter(counter_) {}
565 };
566 
567 /* TestTransaction() checks if a speculative mutex actually uses transactions. */
568 template<typename M>
TestTransaction(const char * name)569 void TestTransaction( const char * name )
570 {
571     Counter<M> counter;
572 #if TBB_TEST_LOW_WORKLOAD
573     const int n = 100;
574 #else
575     const int n = 1000;
576 #endif
577     REMARK("TestTransaction with %s: ",name);
578 
579     n_transactions_attempted = 0;
580     tbb::tick_count start, stop;
581     for( int i=0; i<5 && n_transactions_attempted==0; ++i ) {
582         counter.value = 0;
583         start = tbb::tick_count::now();
584         tbb::parallel_for(tbb::blocked_range<size_t>(0,n,2),AddOne_CheckTransaction<Counter<M> >(counter));
585         stop = tbb::tick_count::now();
586         if( counter.value!=n ) {
587             REPORT("ERROR for %s: counter.value=%ld\n",name,counter.value);
588             break;
589         }
590     }
591 
592     if( n_transactions_attempted==0 )
593         REPORT( "ERROR: transactions were never attempted\n" );
594     else
595         REMARK("%d successful transactions in %6.6f seconds\n", (int)n_transactions_attempted, (stop - start).seconds());
596 }
597 #endif  /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */
598 
599 template<typename M>
600 class RWStateMultipleChangeBody {
601     M& my_mutex;
602 public:
RWStateMultipleChangeBody(M & m)603     RWStateMultipleChangeBody(M& m) : my_mutex(m) {}
604 
operator ()(const tbb::blocked_range<size_t> & r) const605     void operator()(const tbb::blocked_range<size_t>& r) const {
606         typename M::scoped_lock l(my_mutex, /*write=*/false);
607         for(size_t i = r.begin(); i != r.end(); ++i) {
608             ASSERT(l.downgrade_to_reader(), "Downgrade must succeed for read lock");
609         }
610         l.upgrade_to_writer();
611         for(size_t i = r.begin(); i != r.end(); ++i) {
612             ASSERT(l.upgrade_to_writer(), "Upgrade must succeed for write lock");
613         }
614     }
615 };
616 
617 template<typename M>
TestRWStateMultipleChange()618 void TestRWStateMultipleChange() {
619     ASSERT(M::is_rw_mutex, "Incorrect mutex type");
620     size_t n = 10000;
621     M mutex;
622     RWStateMultipleChangeBody<M> body(mutex);
623     tbb::parallel_for(tbb::blocked_range<size_t>(0, n, n/10), body);
624 }
625 
TestMain()626 int TestMain () {
627     for( int p=MinThread; p<=MaxThread; ++p ) {
628         tbb::task_scheduler_init init( p );
629         REMARK( "testing with %d workers\n", static_cast<int>(p) );
630 #if TBB_TEST_LOW_WORKLOAD
631         // The amount of work is decreased in this mode to bring the length
632         // of the runs under tools into the tolerable limits.
633         const int n = 1;
634 #else
635         const int n = 3;
636 #endif
637         // Run each test several times.
638         for( int i=0; i<n; ++i ) {
639             TestNullMutex<tbb::null_mutex>( "Null Mutex" );
640             TestNullMutex<tbb::null_rw_mutex>( "Null RW Mutex" );
641             TestNullRWMutex<tbb::null_rw_mutex>( "Null RW Mutex" );
642             Test<tbb::spin_mutex>( "Spin Mutex" );
643             Test<tbb::speculative_spin_mutex>( "Spin Mutex/speculative" );
644 #if _OPENMP
645             Test<OpenMP_Mutex>( "OpenMP_Mutex" );
646 #endif /* _OPENMP */
647             Test<tbb::queuing_mutex>( "Queuing Mutex" );
648             Test<tbb::mutex>( "Wrapper Mutex" );
649             Test<tbb::recursive_mutex>( "Recursive Mutex" );
650             Test<tbb::queuing_rw_mutex>( "Queuing RW Mutex" );
651             Test<tbb::spin_rw_mutex>( "Spin RW Mutex" );
652             Test<tbb::speculative_spin_rw_mutex>( "Spin RW Mutex/speculative" );
653 
654             TestTryAcquire_OneThread<tbb::spin_mutex>("Spin Mutex");
655             TestTryAcquire_OneThread<tbb::speculative_spin_mutex>("Spin Mutex/speculative");
656             TestTryAcquire_OneThread<tbb::queuing_mutex>("Queuing Mutex");
657 #if USE_PTHREAD
658             // under ifdef because on Windows tbb::mutex is reenterable and the test will fail
659             TestTryAcquire_OneThread<tbb::mutex>("Wrapper Mutex");
660 #endif /* USE_PTHREAD */
661             TestTryAcquire_OneThread<tbb::recursive_mutex>( "Recursive Mutex" );
662             TestTryAcquire_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex"); // only tests try_acquire for writers
663             TestTryAcquire_OneThread<tbb::speculative_spin_rw_mutex>("Spin RW Mutex/speculative"); // only tests try_acquire for writers
664             TestTryAcquire_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex"); // only tests try_acquire for writers
665 
666             TestTryAcquireReader_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex");
667             TestTryAcquireReader_OneThread<tbb::speculative_spin_rw_mutex>("Spin RW Mutex/speculative");
668             TestTryAcquireReader_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex");
669 
670             TestReaderWriterLock<tbb::queuing_rw_mutex>( "Queuing RW Mutex" );
671             TestReaderWriterLock<tbb::spin_rw_mutex>( "Spin RW Mutex" );
672             TestReaderWriterLock<tbb::speculative_spin_rw_mutex>( "Spin RW Mutex/speculative" );
673 
674             TestRecursiveMutex<tbb::recursive_mutex>( "Recursive Mutex" );
675 
676             // Test ISO C++11 interface
677             TestISO<tbb::spin_mutex>( "ISO Spin Mutex" );
678             TestISO<tbb::mutex>( "ISO Mutex" );
679             TestISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
680             TestISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
681             TestISO<tbb::critical_section>( "ISO Critical Section" );
682             TestTryAcquire_OneThreadISO<tbb::spin_mutex>( "ISO Spin Mutex" );
683 #if USE_PTHREAD
684             // under ifdef because on Windows tbb::mutex is reenterable and the test will fail
685             TestTryAcquire_OneThreadISO<tbb::mutex>( "ISO Mutex" );
686 #endif /* USE_PTHREAD */
687             TestTryAcquire_OneThreadISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
688             TestTryAcquire_OneThreadISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
689             TestTryAcquire_OneThreadISO<tbb::critical_section>( "ISO Critical Section" );
690             TestReaderWriterLockISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
691             TestRecursiveMutexISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
692 
693             TestRWStateMultipleChange<tbb::spin_rw_mutex>();
694             TestRWStateMultipleChange<tbb::speculative_spin_rw_mutex>();
695             TestRWStateMultipleChange<tbb::queuing_rw_mutex>();
696         }
697     }
698 
699 #if __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER
700     // additional test for speculative mutexes to see if we actually attempt lock elisions
701     if( have_TSX() ) {
702         tbb::task_scheduler_init init( MaxThread );
703         TestTransaction<tbb::speculative_spin_mutex>( "Spin Mutex/speculative" );
704         TestTransaction<tbb::speculative_spin_rw_mutex>( "Spin RW Mutex/speculative" );
705     }
706     else {
707         REMARK("Hardware transactions not supported\n");
708     }
709 #endif
710     return Harness::Done;
711 }
712