1 /*
2     Copyright (c) 2005-2017 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #ifndef __TBB_queuing_mutex_H
22 #define __TBB_queuing_mutex_H
23 
24 #include <cstring>
25 #include "atomic.h"
26 #include "tbb_profiling.h"
27 
28 namespace tbb {
29 
30 //! Queuing mutex with local-only spinning.
31 /** @ingroup synchronization */
32 class queuing_mutex : internal::mutex_copy_deprecated_and_disabled {
33 public:
34     //! Construct unacquired mutex.
queuing_mutex()35     queuing_mutex() {
36         q_tail = NULL;
37 #if TBB_USE_THREADING_TOOLS
38         internal_construct();
39 #endif
40     }
41 
42     //! The scoped locking pattern
43     /** It helps to avoid the common problem of forgetting to release lock.
44         It also nicely provides the "node" for queuing locks. */
45     class scoped_lock: internal::no_copy {
46         //! Initialize fields to mean "no lock held".
initialize()47         void initialize() {
48             mutex = NULL;
49 #if TBB_USE_ASSERT
50             internal::poison_pointer(next);
51 #endif /* TBB_USE_ASSERT */
52         }
53 
54     public:
55         //! Construct lock that has not acquired a mutex.
56         /** Equivalent to zero-initialization of *this. */
scoped_lock()57         scoped_lock() {initialize();}
58 
59         //! Acquire lock on given mutex.
scoped_lock(queuing_mutex & m)60         scoped_lock( queuing_mutex& m ) {
61             initialize();
62             acquire(m);
63         }
64 
65         //! Release lock (if lock is held).
~scoped_lock()66         ~scoped_lock() {
67             if( mutex ) release();
68         }
69 
70         //! Acquire lock on given mutex.
71         void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m );
72 
73         //! Acquire lock on given mutex if free (i.e. non-blocking)
74         bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m );
75 
76         //! Release lock.
77         void __TBB_EXPORTED_METHOD release();
78 
79     private:
80         //! The pointer to the mutex owned, or NULL if not holding a mutex.
81         queuing_mutex* mutex;
82 
83         //! The pointer to the next competitor for a mutex
84         scoped_lock *next;
85 
86         //! The local spin-wait variable
87         /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of
88             zero-initialization.  Defining it as an entire word instead of
89             a byte seems to help performance slightly. */
90         uintptr_t going;
91     };
92 
93     void __TBB_EXPORTED_METHOD internal_construct();
94 
95     // Mutex traits
96     static const bool is_rw_mutex = false;
97     static const bool is_recursive_mutex = false;
98     static const bool is_fair_mutex = true;
99 
100 private:
101     //! The last competitor requesting the lock
102     atomic<scoped_lock*> q_tail;
103 
104 };
105 
106 __TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex)
107 
108 } // namespace tbb
109 
110 #endif /* __TBB_queuing_mutex_H */
111