1 /*
2     Copyright (c) 2005-2021 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef _TBB_mailbox_H
18 #define _TBB_mailbox_H
19 
20 #include "oneapi/tbb/cache_aligned_allocator.h"
21 #include "oneapi/tbb/detail/_small_object_pool.h"
22 
23 #include "scheduler_common.h"
24 
25 #include <atomic>
26 
27 namespace tbb {
28 namespace detail {
29 namespace r1 {
30 
31 struct task_proxy : public d1::task {
32     static const intptr_t      pool_bit = 1<<0;
33     static const intptr_t   mailbox_bit = 1<<1;
34     static const intptr_t location_mask = pool_bit | mailbox_bit;
35     /* All but two low-order bits represent a (task*).
36        Two low-order bits mean:
37        1 = proxy is/was/will be in task pool
38        2 = proxy is/was/will be in mailbox */
39     std::atomic<intptr_t> task_and_tag;
40 
41     //! Pointer to next task_proxy in a mailbox
42     std::atomic<task_proxy*> next_in_mailbox;
43 
44     //! Mailbox to which this was mailed.
45     mail_outbox* outbox;
46 
47     //! Task affinity id which is referenced
48     d1::slot_id slot;
49 
50     d1::small_object_allocator allocator;
51 
52     //! True if the proxy is stored both in its sender's pool and in the destination mailbox.
is_sharedtask_proxy53     static bool is_shared ( intptr_t tat ) {
54         return (tat & location_mask) == location_mask;
55     }
56 
57     //! Returns a pointer to the encapsulated task or nullptr.
task_ptrtask_proxy58     static task* task_ptr ( intptr_t tat ) {
59         return (task*)(tat & ~location_mask);
60     }
61 
62     //! Returns a pointer to the encapsulated task or nullptr, and frees proxy if necessary.
63     template<intptr_t from_bit>
extract_tasktask_proxy64     inline task* extract_task () {
65         // __TBB_ASSERT( prefix().extra_state == es_task_proxy, "Normal task misinterpreted as a proxy?" );
66         intptr_t tat = task_and_tag.load(std::memory_order_acquire);
67         __TBB_ASSERT( tat == from_bit || (is_shared(tat) && task_ptr(tat)),
68             "Proxy's tag cannot specify both locations if the proxy "
69             "was retrieved from one of its original locations" );
70         if ( tat != from_bit ) {
71             const intptr_t cleaner_bit = location_mask & ~from_bit;
72             // Attempt to transition the proxy to the "empty" state with
73             // cleaner_bit specifying entity responsible for its eventual freeing.
74             // Explicit cast to void* is to work around a seeming ICC 11.1 bug.
75             if ( task_and_tag.compare_exchange_strong(tat, cleaner_bit) ) {
76                 // Successfully grabbed the task, and left new owner with the job of freeing the proxy
77                 return task_ptr(tat);
78             }
79         }
80         // Proxied task has already been claimed from another proxy location.
81         __TBB_ASSERT( task_and_tag.load(std::memory_order_relaxed) == from_bit, "Empty proxy cannot contain non-zero task pointer" );
82         return nullptr;
83     }
84 
executetask_proxy85     virtual task* execute(d1::execution_data&) {
86         __TBB_ASSERT_RELEASE(false, nullptr);
87         return nullptr;
88     }
canceltask_proxy89     virtual task* cancel(d1::execution_data&) {
90         __TBB_ASSERT_RELEASE(false, nullptr);
91         return nullptr;
92     }
93 }; // struct task_proxy
94 
95 //! Internal representation of mail_outbox, without padding.
96 class unpadded_mail_outbox {
97 protected:
98     typedef std::atomic<task_proxy*> atomic_proxy_ptr;
99 
100     //! Pointer to first task_proxy in mailbox, or nullptr if box is empty.
101     atomic_proxy_ptr my_first;
102 
103     //! Pointer to pointer that will point to next item in the queue.  Never nullptr.
104     std::atomic<atomic_proxy_ptr*> my_last;
105 
106     //! Owner of mailbox is not executing a task, and has drained its own task pool.
107     std::atomic<bool> my_is_idle;
108 };
109 
110 // TODO: - consider moving to arena slot
111 //! Class representing where mail is put.
112 /** Padded to occupy a cache line. */
113 class mail_outbox : padded<unpadded_mail_outbox> {
114 
internal_pop(isolation_type isolation)115     task_proxy* internal_pop( isolation_type isolation ) {
116         task_proxy* curr = my_first.load(std::memory_order_acquire);
117         if ( !curr )
118             return nullptr;
119         atomic_proxy_ptr* prev_ptr = &my_first;
120         if ( isolation != no_isolation ) {
121             while ( task_accessor::isolation(*curr) != isolation ) {
122                 prev_ptr = &curr->next_in_mailbox;
123                 // The next_in_mailbox should be read with acquire to guarantee (*curr) consistency.
124                 curr = curr->next_in_mailbox.load(std::memory_order_acquire);
125                 if ( !curr )
126                     return nullptr;
127             }
128         }
129         // There is a first item in the mailbox.  See if there is a second.
130         // The next_in_mailbox should be read with acquire to guarantee (*second) consistency.
131         if ( task_proxy* second = curr->next_in_mailbox.load(std::memory_order_acquire) ) {
132             // There are at least two items, so first item can be popped easily.
133             prev_ptr->store(second, std::memory_order_relaxed);
134         } else {
135             // There is only one item. Some care is required to pop it.
136 
137             prev_ptr->store(nullptr, std::memory_order_relaxed);
138             atomic_proxy_ptr* expected = &curr->next_in_mailbox;
139             if ( my_last.compare_exchange_strong( expected, prev_ptr ) ) {
140                 // Successfully transitioned mailbox from having one item to having none.
141                 __TBB_ASSERT( !curr->next_in_mailbox.load(std::memory_order_relaxed), nullptr);
142             } else {
143                 // Some other thread updated my_last but has not filled in first->next_in_mailbox
144                 // Wait until first item points to second item.
145                 atomic_backoff backoff;
146                 // The next_in_mailbox should be read with acquire to guarantee (*second) consistency.
147                 while ( !(second = curr->next_in_mailbox.load(std::memory_order_acquire)) ) backoff.pause();
148                 prev_ptr->store( second, std::memory_order_relaxed);
149             }
150         }
151         assert_pointer_valid(curr);
152         return curr;
153     }
154 public:
155     friend class mail_inbox;
156 
157     //! Push task_proxy onto the mailbox queue of another thread.
158     /** Implementation is wait-free. */
push(task_proxy * t)159     void push( task_proxy* t ) {
160         assert_pointer_valid(t);
161         t->next_in_mailbox.store(nullptr, std::memory_order_relaxed);
162         atomic_proxy_ptr* const link = my_last.exchange(&t->next_in_mailbox);
163         // Logically, the release fence is not required because the exchange above provides the
164         // release-acquire semantic that guarantees that (*t) will be consistent when another thread
165         // loads the link atomic. However, C++11 memory model guarantees consistency of(*t) only
166         // when the same atomic is used for synchronization.
167         link->store(t, std::memory_order_release);
168     }
169 
170     //! Return true if mailbox is empty
empty()171     bool empty() {
172         return my_first.load(std::memory_order_relaxed) == nullptr;
173     }
174 
175     //! Construct *this as a mailbox from zeroed memory.
176     /** Raise assertion if *this is not previously zeroed, or sizeof(this) is wrong.
177         This method is provided instead of a full constructor since we know the object
178         will be constructed in zeroed memory. */
construct()179     void construct() {
180         __TBB_ASSERT( sizeof(*this)==max_nfs_size, nullptr );
181         __TBB_ASSERT( !my_first.load(std::memory_order_relaxed), nullptr );
182         __TBB_ASSERT( !my_last.load(std::memory_order_relaxed), nullptr );
183         __TBB_ASSERT( !my_is_idle.load(std::memory_order_relaxed), nullptr );
184         my_last = &my_first;
185         suppress_unused_warning(pad);
186     }
187 
188     //! Drain the mailbox
drain()189     void drain() {
190         // No fences here because other threads have already quit.
191         for( ; task_proxy* t = my_first; ) {
192             my_first.store(t->next_in_mailbox, std::memory_order_relaxed);
193             t->allocator.delete_object(t);
194         }
195     }
196 
197     //! True if thread that owns this mailbox is looking for work.
recipient_is_idle()198     bool recipient_is_idle() {
199         return my_is_idle.load(std::memory_order_relaxed);
200     }
201 }; // class mail_outbox
202 
203 //! Class representing source of mail.
204 class mail_inbox {
205     //! Corresponding sink where mail that we receive will be put.
206     mail_outbox* my_putter;
207 public:
208     //! Construct unattached inbox
mail_inbox()209     mail_inbox() : my_putter(nullptr) {}
210 
211     //! Attach inbox to a corresponding outbox.
attach(mail_outbox & putter)212     void attach( mail_outbox& putter ) {
213         my_putter = &putter;
214     }
215     //! Detach inbox from its outbox
detach()216     void detach() {
217         __TBB_ASSERT(my_putter,"not attached");
218         my_putter = nullptr;
219     }
220     //! Get next piece of mail, or nullptr if mailbox is empty.
pop(isolation_type isolation)221     task_proxy* pop( isolation_type isolation ) {
222         return my_putter->internal_pop( isolation );
223     }
224     //! Return true if mailbox is empty
empty()225     bool empty() {
226         return my_putter->empty();
227     }
228     //! Indicate whether thread that reads this mailbox is idle.
229     /** Raises assertion failure if mailbox is redundantly marked as not idle. */
set_is_idle(bool value)230     void set_is_idle( bool value ) {
231         if( my_putter ) {
232             __TBB_ASSERT( my_putter->my_is_idle.load(std::memory_order_relaxed) || value, "attempt to redundantly mark mailbox as not idle" );
233             my_putter->my_is_idle.store(value, std::memory_order_relaxed);
234         }
235     }
236     //! Indicate whether thread that reads this mailbox is idle.
is_idle_state(bool value)237     bool is_idle_state ( bool value ) const {
238         return !my_putter || my_putter->my_is_idle.load(std::memory_order_relaxed) == value;
239     }
240 }; // class mail_inbox
241 
242 } // namespace r1
243 } // namespace detail
244 } // namespace tbb
245 
246 #endif /* _TBB_mailbox_H */
247