1 /*
2     Copyright 2005-2014 Intel Corporation.  All Rights Reserved.
3 
4     This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5     you can redistribute it and/or modify it under the terms of the GNU General Public License
6     version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is
7     distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8     implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9     See  the GNU General Public License for more details.   You should have received a copy of
10     the  GNU General Public License along with Threading Building Blocks; if not, write to the
11     Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA
12 
13     As a special exception,  you may use this file  as part of a free software library without
14     restriction.  Specifically,  if other files instantiate templates  or use macros or inline
15     functions from this file, or you compile this file and link it with other files to produce
16     an executable,  this file does not by itself cause the resulting executable to be covered
17     by the GNU General Public License. This exception does not however invalidate any other
18     reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_concurrent_monitor_H
22 #define __TBB_concurrent_monitor_H
23 
24 #include "tbb/tbb_stddef.h"
25 #include "tbb/atomic.h"
26 #include "tbb/spin_mutex.h"
27 #include "tbb/tbb_exception.h"
28 #include "tbb/aligned_space.h"
29 
30 #include "semaphore.h"
31 
32 namespace tbb {
33 namespace internal {
34 
35 //! Circular doubly-linked list with sentinel
36 /** head.next points to the front and head.prev points to the back */
37 class circular_doubly_linked_list_with_sentinel : no_copy {
38 public:
39     struct node_t {
40         node_t* next;
41         node_t* prev;
node_tnode_t42         explicit node_t() : next((node_t*)(uintptr_t)0xcdcdcdcd), prev((node_t*)(uintptr_t)0xcdcdcdcd) {}
43     };
44 
45     // ctor
circular_doubly_linked_list_with_sentinel()46     circular_doubly_linked_list_with_sentinel() {clear();}
47     // dtor
~circular_doubly_linked_list_with_sentinel()48     ~circular_doubly_linked_list_with_sentinel() {__TBB_ASSERT( head.next==&head && head.prev==&head, "the list is not empty" );}
49 
size()50     inline size_t  size()  const {return count;}
empty()51     inline bool    empty() const {return size()==0;}
front()52     inline node_t* front() const {return head.next;}
last()53     inline node_t* last()  const {return head.prev;}
begin()54     inline node_t* begin() const {return front();}
end()55     inline const node_t* end() const {return &head;}
56 
57     //! add to the back of the list
add(node_t * n)58     inline void add( node_t* n ) {
59         __TBB_store_relaxed(count, __TBB_load_relaxed(count) + 1);
60         n->prev = head.prev;
61         n->next = &head;
62         head.prev->next = n;
63         head.prev = n;
64     }
65 
66     //! remove node 'n'
remove(node_t & n)67     inline void remove( node_t& n ) {
68         __TBB_store_relaxed(count, __TBB_load_relaxed(count) - 1);
69         n.prev->next = n.next;
70         n.next->prev = n.prev;
71     }
72 
73     //! move all elements to 'lst' and initialize the 'this' list
flush_to(circular_doubly_linked_list_with_sentinel & lst)74     inline void flush_to( circular_doubly_linked_list_with_sentinel& lst ) {
75         if( const size_t l_count = __TBB_load_relaxed(count) ) {
76             __TBB_store_relaxed(lst.count, l_count);
77             lst.head.next = head.next;
78             lst.head.prev = head.prev;
79             head.next->prev = &lst.head;
80             head.prev->next = &lst.head;
81             clear();
82         }
83     }
84 
clear()85     void clear() {head.next = head.prev = &head; __TBB_store_relaxed(count, 0);}
86 private:
87     __TBB_atomic size_t count;
88     node_t head;
89 };
90 
91 typedef circular_doubly_linked_list_with_sentinel waitset_t;
92 typedef circular_doubly_linked_list_with_sentinel dllist_t;
93 typedef circular_doubly_linked_list_with_sentinel::node_t waitset_node_t;
94 
95 //! concurrent_monitor
96 /** fine-grained concurrent_monitor implementation */
97 class concurrent_monitor : no_copy {
98 public:
99     /** per-thread descriptor for concurrent_monitor */
100     class thread_context : waitset_node_t, no_copy {
101         friend class concurrent_monitor;
102     public:
thread_context()103         thread_context() : spurious(false), aborted(false), ready(false), context(0) {
104             epoch = 0;
105             in_waitset = false;
106         }
~thread_context()107         ~thread_context() {
108             if (ready) {
109                 if( spurious ) semaphore().P();
110                 semaphore().~binary_semaphore();
111             }
112         }
semaphore()113         binary_semaphore& semaphore() { return *sema.begin(); }
114     private:
115         //! The method for lazy initialization of the thread_context's semaphore.
116         //  Inlining of the method is undesirable, due to extra instructions for
117         //  exception support added at caller side.
118         __TBB_NOINLINE( void init() );
119         tbb::aligned_space<binary_semaphore> sema;
120         __TBB_atomic unsigned epoch;
121         tbb::atomic<bool> in_waitset;
122         bool  spurious;
123         bool  aborted;
124         bool  ready;
125         uintptr_t context;
126     };
127 
128     //! ctor
concurrent_monitor()129     concurrent_monitor() {__TBB_store_relaxed(epoch, 0);}
130 
131     //! dtor
132     ~concurrent_monitor() ;
133 
134     //! prepare wait by inserting 'thr' into the wait queue
135     void prepare_wait( thread_context& thr, uintptr_t ctx = 0 );
136 
137     //! Commit wait if event count has not changed; otherwise, cancel wait.
138     /** Returns true if committed, false if canceled. */
commit_wait(thread_context & thr)139     inline bool commit_wait( thread_context& thr ) {
140         const bool do_it = thr.epoch == __TBB_load_relaxed(epoch);
141         // this check is just an optimization
142         if( do_it ) {
143             __TBB_ASSERT( thr.ready, "use of commit_wait() without prior prepare_wait()");
144             thr.semaphore().P();
145             __TBB_ASSERT( !thr.in_waitset, "still in the queue?" );
146             if( thr.aborted )
147                 throw_exception( eid_user_abort );
148         } else {
149             cancel_wait( thr );
150         }
151         return do_it;
152     }
153     //! Cancel the wait. Removes the thread from the wait queue if not removed yet.
154     void cancel_wait( thread_context& thr );
155 
156     //! Wait for a condition to be satisfied with waiting-on context
157     template<typename WaitUntil, typename Context>
158     void wait( WaitUntil until, Context on );
159 
160     //! Notify one thread about the event
notify_one()161     void notify_one() {atomic_fence(); notify_one_relaxed();}
162 
163     //! Notify one thread about the event. Relaxed version.
164     void notify_one_relaxed();
165 
166     //! Notify all waiting threads of the event
notify_all()167     void notify_all() {atomic_fence(); notify_all_relaxed();}
168 
169     //! Notify all waiting threads of the event; Relaxed version
170     void notify_all_relaxed();
171 
172     //! Notify waiting threads of the event that satisfies the given predicate
notify(const P & predicate)173     template<typename P> void notify( const P& predicate ) {atomic_fence(); notify_relaxed( predicate );}
174 
175     //! Notify waiting threads of the event that satisfies the given predicate; Relaxed version
176     template<typename P> void notify_relaxed( const P& predicate );
177 
178     //! Abort any sleeping threads at the time of the call
abort_all()179     void abort_all() {atomic_fence(); abort_all_relaxed(); }
180 
181     //! Abort any sleeping threads at the time of the call; Relaxed version
182     void abort_all_relaxed();
183 
184 private:
185     tbb::spin_mutex mutex_ec;
186     waitset_t       waitset_ec;
187     __TBB_atomic unsigned epoch;
to_thread_context(waitset_node_t * n)188     thread_context* to_thread_context( waitset_node_t* n ) { return static_cast<thread_context*>(n); }
189 };
190 
191 template<typename WaitUntil, typename Context>
wait(WaitUntil until,Context on)192 void concurrent_monitor::wait( WaitUntil until, Context on )
193 {
194     bool slept = false;
195     thread_context thr_ctx;
196     prepare_wait( thr_ctx, on() );
197     while( !until() ) {
198         if( (slept = commit_wait( thr_ctx ) )==true )
199             if( until() ) break;
200         slept = false;
201         prepare_wait( thr_ctx, on() );
202     }
203     if( !slept )
204         cancel_wait( thr_ctx );
205 }
206 
207 template<typename P>
notify_relaxed(const P & predicate)208 void concurrent_monitor::notify_relaxed( const P& predicate ) {
209         if( waitset_ec.empty() )
210             return;
211         dllist_t temp;
212         waitset_node_t* nxt;
213         const waitset_node_t* end = waitset_ec.end();
214         {
215             tbb::spin_mutex::scoped_lock l( mutex_ec );
216             __TBB_store_relaxed(epoch, __TBB_load_relaxed(epoch) + 1);
217             for( waitset_node_t* n=waitset_ec.last(); n!=end; n=nxt ) {
218                 nxt = n->prev;
219                 thread_context* thr = to_thread_context( n );
220                 if( predicate( thr->context ) ) {
221                     waitset_ec.remove( *n );
222                     thr->in_waitset = false;
223                     temp.add( n );
224                 }
225             }
226         }
227 
228         end = temp.end();
229         for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {
230             nxt = n->next;
231             to_thread_context(n)->semaphore().V();
232         }
233 #if TBB_USE_ASSERT
234         temp.clear();
235 #endif
236 }
237 
238 } // namespace internal
239 } // namespace tbb
240 
241 #endif /* __TBB_concurrent_monitor_H */
242