1 /*
2     Copyright 2005-2014 Intel Corporation.  All Rights Reserved.
3 
4     This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5     you can redistribute it and/or modify it under the terms of the GNU General Public License
6     version 2  as  published  by  the  Free Software Foundation.  Threading Building Blocks is
7     distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8     implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9     See  the GNU General Public License for more details.   You should have received a copy of
10     the  GNU General Public License along with Threading Building Blocks; if not, write to the
11     Free Software Foundation, Inc.,  51 Franklin St,  Fifth Floor,  Boston,  MA 02110-1301 USA
12 
13     As a special exception,  you may use this file  as part of a free software library without
14     restriction.  Specifically,  if other files instantiate templates  or use macros or inline
15     functions from this file, or you compile this file and link it with other files to produce
16     an executable,  this file does not by itself cause the resulting executable to be covered
17     by the GNU General Public License. This exception does not however invalidate any other
18     reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef _TBB_mailbox_H
22 #define _TBB_mailbox_H
23 
24 #include "tbb/tbb_stddef.h"
25 #include "tbb/cache_aligned_allocator.h"
26 
27 #include "scheduler_common.h"
28 #include "tbb/atomic.h"
29 
30 namespace tbb {
31 namespace internal {
32 
33 struct task_proxy : public task {
34     static const intptr_t      pool_bit = 1<<0;
35     static const intptr_t   mailbox_bit = 1<<1;
36     static const intptr_t location_mask = pool_bit | mailbox_bit;
37     /* All but two low-order bits represent a (task*).
38        Two low-order bits mean:
39        1 = proxy is/was/will be in task pool
40        2 = proxy is/was/will be in mailbox */
41     intptr_t task_and_tag;
42 
43     //! Pointer to next task_proxy in a mailbox
44     task_proxy *__TBB_atomic next_in_mailbox;
45 
46     //! Mailbox to which this was mailed.
47     mail_outbox* outbox;
48 
49     //! True if the proxy is stored both in its sender's pool and in the destination mailbox.
is_sharedtask_proxy50     static bool is_shared ( intptr_t tat ) {
51         return (tat & location_mask) == location_mask;
52     }
53 
54     //! Returns a pointer to the encapsulated task or NULL.
task_ptrtask_proxy55     static task* task_ptr ( intptr_t tat ) {
56         return (task*)(tat & ~location_mask);
57     }
58 
59     //! Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary.
60     template<intptr_t from_bit>
extract_tasktask_proxy61     inline task* extract_task () {
62         __TBB_ASSERT( prefix().extra_state == es_task_proxy, "Normal task misinterpreted as a proxy?" );
63         intptr_t tat = __TBB_load_with_acquire(task_and_tag);
64         __TBB_ASSERT( tat == from_bit || (is_shared(tat) && task_ptr(tat)),
65             "Proxy's tag cannot specify both locations if the proxy "
66             "was retrieved from one of its original locations" );
67         if ( tat != from_bit ) {
68             const intptr_t cleaner_bit = location_mask & ~from_bit;
69             // Attempt to transition the proxy to the "empty" state with
70             // cleaner_bit specifying entity responsible for its eventual freeing.
71             // Explicit cast to void* is to work around a seeming ICC 11.1 bug.
72             if ( as_atomic(task_and_tag).compare_and_swap(cleaner_bit, tat) == tat ) {
73                 // Successfully grabbed the task, and left new owner with the job of freeing the proxy
74                 return task_ptr(tat);
75             }
76         }
77         // Proxied task has already been claimed from another proxy location.
78         __TBB_ASSERT( task_and_tag == from_bit, "Empty proxy cannot contain non-zero task pointer" );
79         poison_pointer(outbox);
80         poison_pointer(next_in_mailbox);
81         poison_value(task_and_tag);
82         return NULL;
83     }
84 }; // struct task_proxy
85 
86 //! Internal representation of mail_outbox, without padding.
87 class unpadded_mail_outbox {
88 protected:
89     typedef task_proxy*__TBB_atomic proxy_ptr;
90 
91     //! Pointer to first task_proxy in mailbox, or NULL if box is empty.
92     proxy_ptr my_first;
93 
94     //! Pointer to pointer that will point to next item in the queue.  Never NULL.
95     proxy_ptr* __TBB_atomic my_last;
96 
97     //! Owner of mailbox is not executing a task, and has drained its own task pool.
98     bool my_is_idle;
99 };
100 
101 //! Class representing where mail is put.
102 /** Padded to occupy a cache line. */
103 class mail_outbox : padded<unpadded_mail_outbox> {
104 
internal_pop()105     task_proxy* internal_pop() {
106         task_proxy* const first = __TBB_load_relaxed(my_first);
107         if( !first )
108             return NULL;
109         __TBB_control_consistency_helper(); // on my_first
110         // There is a first item in the mailbox.  See if there is a second.
111         if( task_proxy* second = first->next_in_mailbox ) {
112             // There are at least two items, so first item can be popped easily.
113             my_first = second;
114         } else {
115             // There is only one item.  Some care is required to pop it.
116             my_first = NULL;
117             if( as_atomic(my_last).compare_and_swap(&my_first,&first->next_in_mailbox) == &first->next_in_mailbox )
118             {
119                 // Successfully transitioned mailbox from having one item to having none.
120                 __TBB_ASSERT(!first->next_in_mailbox,NULL);
121             } else {
122                 // Some other thread updated my_last but has not filled in first->next_in_mailbox
123                 // Wait until first item points to second item.
124                 atomic_backoff backoff;
125                 while( !(second = first->next_in_mailbox) ) backoff.pause();
126                 my_first = second;
127             }
128         }
129         return first;
130     }
131 public:
132     friend class mail_inbox;
133 
134     //! Push task_proxy onto the mailbox queue of another thread.
135     /** Implementation is wait-free. */
push(task_proxy & t)136     void push( task_proxy& t ) {
137         __TBB_ASSERT(&t, NULL);
138         t.next_in_mailbox = NULL;
139         proxy_ptr * const link = (proxy_ptr *)__TBB_FetchAndStoreW(&my_last,(intptr_t)&t.next_in_mailbox);
140         // No release fence required for the next store, because there are no memory operations
141         // between the previous fully fenced atomic operation and the store.
142         __TBB_store_relaxed(*link, &t);
143     }
144 
145     //! Return true if mailbox is empty
empty()146     bool empty() {
147         return __TBB_load_relaxed(my_first) == NULL;
148     }
149 
150     //! Construct *this as a mailbox from zeroed memory.
151     /** Raise assertion if *this is not previously zeroed, or sizeof(this) is wrong.
152         This method is provided instead of a full constructor since we know the object
153         will be constructed in zeroed memory. */
construct()154     void construct() {
155         __TBB_ASSERT( sizeof(*this)==NFS_MaxLineSize, NULL );
156         __TBB_ASSERT( !my_first, NULL );
157         __TBB_ASSERT( !my_last, NULL );
158         __TBB_ASSERT( !my_is_idle, NULL );
159         my_last=&my_first;
160         suppress_unused_warning(pad);
161     }
162 
163     //! Drain the mailbox
drain()164     intptr_t drain() {
165         intptr_t k = 0;
166         // No fences here because other threads have already quit.
167         for( ; task_proxy* t = my_first; ++k ) {
168             my_first = t->next_in_mailbox;
169             NFS_Free((char*)t - task_prefix_reservation_size);
170         }
171         return k;
172     }
173 
174     //! True if thread that owns this mailbox is looking for work.
recipient_is_idle()175     bool recipient_is_idle() {
176         return my_is_idle;
177     }
178 }; // class mail_outbox
179 
180 //! Class representing source of mail.
181 class mail_inbox {
182     //! Corresponding sink where mail that we receive will be put.
183     mail_outbox* my_putter;
184 public:
185     //! Construct unattached inbox
mail_inbox()186     mail_inbox() : my_putter(NULL) {}
187 
188     //! Attach inbox to a corresponding outbox.
attach(mail_outbox & putter)189     void attach( mail_outbox& putter ) {
190         __TBB_ASSERT(!my_putter,"already attached");
191         my_putter = &putter;
192     }
193     //! Detach inbox from its outbox
detach()194     void detach() {
195         __TBB_ASSERT(my_putter,"not attached");
196         my_putter = NULL;
197     }
198     //! Get next piece of mail, or NULL if mailbox is empty.
pop()199     task_proxy* pop() {
200         return my_putter->internal_pop();
201     }
202     //! Return true if mailbox is empty
empty()203     bool empty() {
204         return my_putter->empty();
205     }
206     //! Indicate whether thread that reads this mailbox is idle.
207     /** Raises assertion failure if mailbox is redundantly marked as not idle. */
set_is_idle(bool value)208     void set_is_idle( bool value ) {
209         if( my_putter ) {
210             __TBB_ASSERT( my_putter->my_is_idle || value, "attempt to redundantly mark mailbox as not idle" );
211             my_putter->my_is_idle = value;
212         }
213     }
214     //! Indicate whether thread that reads this mailbox is idle.
is_idle_state(bool value)215     bool is_idle_state ( bool value ) const {
216         return !my_putter || my_putter->my_is_idle == value;
217     }
218 
219 #if DO_ITT_NOTIFY
220     //! Get pointer to corresponding outbox used for ITT_NOTIFY calls.
outbox()221     void* outbox() const {return my_putter;}
222 #endif /* DO_ITT_NOTIFY */
223 }; // class mail_inbox
224 
225 } // namespace internal
226 } // namespace tbb
227 
228 #endif /* _TBB_mailbox_H */
229