1 /*
2     Copyright 2005-2014 Intel Corporation.  All Rights Reserved.
3 
4     This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5     you can redistribute it and/or modify it under the terms of the GNU General Public License
6     version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is
7     distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8     implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9     See  the GNU General Public License for more details.   You should have received a copy of
10     the  GNU General Public License along with Threading Building Blocks; if not, write to the
11     Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA
12 
13     As a special exception,  you may use this file  as part of a free software library without
14     restriction.  Specifically,  if other files instantiate templates  or use macros or inline
15     functions from this file, or you compile this file and link it with other files to produce
16     an executable,  this file does not by itself cause the resulting executable to be covered
17     by the GNU General Public License. This exception does not however invalidate any other
18     reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_queuing_mutex_H
22 #define __TBB_queuing_mutex_H
23 
24 #include "tbb_config.h"
25 
26 #if !TBB_USE_EXCEPTIONS && _MSC_VER
27     // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
28     #pragma warning (push)
29     #pragma warning (disable: 4530)
30 #endif
31 
32 #include <cstring>
33 
34 #if !TBB_USE_EXCEPTIONS && _MSC_VER
35     #pragma warning (pop)
36 #endif
37 
38 #include "atomic.h"
39 #include "tbb_profiling.h"
40 
41 namespace tbb {
42 
43 //! Queuing mutex with local-only spinning.
44 /** @ingroup synchronization */
45 class queuing_mutex : internal::mutex_copy_deprecated_and_disabled {
46 public:
47     //! Construct unacquired mutex.
queuing_mutex()48     queuing_mutex() {
49         q_tail = NULL;
50 #if TBB_USE_THREADING_TOOLS
51         internal_construct();
52 #endif
53     }
54 
55     //! The scoped locking pattern
56     /** It helps to avoid the common problem of forgetting to release lock.
57         It also nicely provides the "node" for queuing locks. */
58     class scoped_lock: internal::no_copy {
59         //! Initialize fields to mean "no lock held".
initialize()60         void initialize() {
61             mutex = NULL;
62 #if TBB_USE_ASSERT
63             internal::poison_pointer(next);
64 #endif /* TBB_USE_ASSERT */
65         }
66 
67     public:
68         //! Construct lock that has not acquired a mutex.
69         /** Equivalent to zero-initialization of *this. */
scoped_lock()70         scoped_lock() {initialize();}
71 
72         //! Acquire lock on given mutex.
scoped_lock(queuing_mutex & m)73         scoped_lock( queuing_mutex& m ) {
74             initialize();
75             acquire(m);
76         }
77 
78         //! Release lock (if lock is held).
~scoped_lock()79         ~scoped_lock() {
80             if( mutex ) release();
81         }
82 
83         //! Acquire lock on given mutex.
84         void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m );
85 
86         //! Acquire lock on given mutex if free (i.e. non-blocking)
87         bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m );
88 
89         //! Release lock.
90         void __TBB_EXPORTED_METHOD release();
91 
92     private:
93         //! The pointer to the mutex owned, or NULL if not holding a mutex.
94         queuing_mutex* mutex;
95 
96         //! The pointer to the next competitor for a mutex
97         scoped_lock *next;
98 
99         //! The local spin-wait variable
100         /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of
101             zero-initialization.  Defining it as an entire word instead of
102             a byte seems to help performance slightly. */
103         uintptr_t going;
104     };
105 
106     void __TBB_EXPORTED_METHOD internal_construct();
107 
108     // Mutex traits
109     static const bool is_rw_mutex = false;
110     static const bool is_recursive_mutex = false;
111     static const bool is_fair_mutex = true;
112 
113 private:
114     //! The last competitor requesting the lock
115     atomic<scoped_lock*> q_tail;
116 
117 };
118 
119 __TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex)
120 
121 } // namespace tbb
122 
123 #endif /* __TBB_queuing_mutex_H */
124