1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
6 #define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
7 
8 #include <stdint.h>
9 
10 #include <functional>
11 #include <memory>
12 #include <queue>
13 #include <unordered_map>
14 #include <vector>
15 
16 #include "base/atomic_sequence_num.h"
17 #include "base/callback.h"
18 #include "base/logging.h"
19 #include "base/macros.h"
20 #include "base/memory/ref_counted.h"
21 #include "base/synchronization/condition_variable.h"
22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread_checker.h"
24 #include "gpu/command_buffer/common/command_buffer_id.h"
25 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/sync_token.h"
27 #include "gpu/command_buffer/service/sequence_id.h"
28 #include "gpu/gpu_export.h"
29 
30 namespace base {
31 class SingleThreadTaskRunner;
32 }  // namespace base
33 
34 namespace gpu {
35 
36 class SyncPointClient;
37 class SyncPointClientState;
38 class SyncPointManager;
39 
40 class GPU_EXPORT SyncPointOrderData
41     : public base::RefCountedThreadSafe<SyncPointOrderData> {
42  public:
43   void Destroy();
44 
sequence_id()45   SequenceId sequence_id() { return sequence_id_; }
46 
processed_order_num()47   uint32_t processed_order_num() const {
48     base::AutoLock auto_lock(lock_);
49     return processed_order_num_;
50   }
51 
unprocessed_order_num()52   uint32_t unprocessed_order_num() const {
53     base::AutoLock auto_lock(lock_);
54     return last_unprocessed_order_num_;
55   }
56 
current_order_num()57   uint32_t current_order_num() const {
58     DCHECK(processing_thread_checker_.CalledOnValidThread());
59     return current_order_num_;
60   }
61 
IsProcessingOrderNumber()62   bool IsProcessingOrderNumber() {
63     DCHECK(processing_thread_checker_.CalledOnValidThread());
64     return !paused_ && current_order_num_ > processed_order_num();
65   }
66 
67   uint32_t GenerateUnprocessedOrderNumber();
68   void BeginProcessingOrderNumber(uint32_t order_num);
69   void PauseProcessingOrderNumber(uint32_t order_num);
70   void FinishProcessingOrderNumber(uint32_t order_num);
71 
72  private:
73   friend class base::RefCountedThreadSafe<SyncPointOrderData>;
74   friend class SyncPointManager;
75   friend class SyncPointClientState;
76 
77   struct OrderFence {
78     uint32_t order_num;
79     uint64_t fence_release;
80     scoped_refptr<SyncPointClientState> client_state;
81 
82     // ID that is unique to the particular SyncPointOrderData.
83     uint64_t callback_id;
84 
85     OrderFence(uint32_t order,
86                uint64_t release,
87                scoped_refptr<SyncPointClientState> state,
88                uint64_t callback_id);
89     OrderFence(const OrderFence& other);
90     ~OrderFence();
91 
92     bool operator>(const OrderFence& rhs) const {
93       return std::tie(order_num, fence_release) >
94              std::tie(rhs.order_num, rhs.fence_release);
95     }
96   };
97   typedef std::priority_queue<OrderFence,
98                               std::vector<OrderFence>,
99                               std::greater<OrderFence>>
100       OrderFenceQueue;
101 
102   SyncPointOrderData(SyncPointManager* sync_point_manager,
103                      SequenceId seqeunce_id);
104 
105   ~SyncPointOrderData();
106 
107   // Returns callback_id for created OrderFence on success, 0 on failure.
108   uint64_t ValidateReleaseOrderNumber(
109       scoped_refptr<SyncPointClientState> client_state,
110       uint32_t wait_order_num,
111       uint64_t fence_release);
112 
113   SyncPointManager* const sync_point_manager_;
114 
115   const SequenceId sequence_id_;
116 
117   uint64_t current_callback_id_ = 0;
118 
119   // Non thread-safe functions need to be called from a single thread.
120   base::ThreadChecker processing_thread_checker_;
121 
122   // Current IPC order number being processed (only used on processing thread).
123   uint32_t current_order_num_ = 0;
124 
125   // Whether or not the current order number is being processed or paused.
126   bool paused_ = false;
127 
128   // This lock protects destroyed_, processed_order_num_,
129   // unprocessed_order_nums_, and order_fence_queue_.
130   mutable base::Lock lock_;
131 
132   bool destroyed_ = false;
133 
134   // Last finished IPC order number.
135   uint32_t processed_order_num_ = 0;
136 
137   // Last unprocessed order number. Updated in GenerateUnprocessedOrderNumber.
138   uint32_t last_unprocessed_order_num_ = 0;
139 
140   // Queue of unprocessed order numbers. Order numbers are enqueued in
141   // GenerateUnprocessedOrderNumber, and dequeued in
142   // FinishProcessingOrderNumber.
143   std::queue<uint32_t> unprocessed_order_nums_;
144 
145   // In situations where we are waiting on fence syncs that do not exist, we
146   // validate by making sure the order number does not pass the order number
147   // which the wait command was issued. If the order number reaches the
148   // wait command's, we should automatically release up to the expected
149   // release count. Note that this also releases other lower release counts,
150   // so a single misbehaved fence sync is enough to invalidate/signal all
151   // previous fence syncs. All order numbers (n) in order_fence_queue_ must
152   // follow the invariant:
153   //   unprocessed_order_nums_.front() < n <= unprocessed_order_nums_.back().
154   OrderFenceQueue order_fence_queue_;
155 
156   DISALLOW_COPY_AND_ASSIGN(SyncPointOrderData);
157 };
158 
159 class GPU_EXPORT SyncPointClientState
160     : public base::RefCountedThreadSafe<SyncPointClientState> {
161  public:
162   void Destroy();
163 
namespace_id()164   CommandBufferNamespace namespace_id() const { return namespace_id_; }
command_buffer_id()165   CommandBufferId command_buffer_id() const { return command_buffer_id_; }
sequence_id()166   SequenceId sequence_id() const { return order_data_->sequence_id(); }
167 
168   // This behaves similarly to SyncPointManager::Wait but uses the order data
169   // to guarantee no deadlocks with other clients. Must be called on order
170   // number processing thread.
171   bool Wait(const SyncToken& sync_token, base::OnceClosure callback);
172 
173   // Like Wait but runs the callback on the given task runner's thread. Must be
174   // called on order number processing thread.
175   bool WaitNonThreadSafe(
176       const SyncToken& sync_token,
177       scoped_refptr<base::SingleThreadTaskRunner> task_runner,
178       base::OnceClosure callback);
179 
180   // Release fence sync and run queued callbacks. Must be called on order number
181   // processing thread.
182   void ReleaseFenceSync(uint64_t release);
183 
184  private:
185   friend class base::RefCountedThreadSafe<SyncPointClientState>;
186   friend class SyncPointManager;
187   friend class SyncPointOrderData;
188 
189   struct ReleaseCallback {
190     uint64_t release_count;
191     base::OnceClosure callback_closure;
192     uint64_t callback_id;
193 
194     ReleaseCallback(uint64_t release,
195                     base::OnceClosure callback,
196                     uint64_t callback_id);
197     ReleaseCallback(ReleaseCallback&& other);
198     ~ReleaseCallback();
199 
200     ReleaseCallback& operator=(ReleaseCallback&& other) = default;
201 
202     bool operator>(const ReleaseCallback& rhs) const {
203       return release_count > rhs.release_count;
204     }
205   };
206   typedef std::priority_queue<ReleaseCallback,
207                               std::vector<ReleaseCallback>,
208                               std::greater<ReleaseCallback>>
209       ReleaseCallbackQueue;
210 
211   SyncPointClientState(SyncPointManager* sync_point_manager,
212                        scoped_refptr<SyncPointOrderData> order_data,
213                        CommandBufferNamespace namespace_id,
214                        CommandBufferId command_buffer_id);
215 
216   ~SyncPointClientState();
217 
218   // Returns true if fence sync has been released.
219   bool IsFenceSyncReleased(uint64_t release);
220 
221   // Queues the callback to be called if the release is valid. If the release
222   // is invalid this function will return False and the callback will never
223   // be called.
224   bool WaitForRelease(uint64_t release,
225                       uint32_t wait_order_num,
226                       base::OnceClosure callback);
227 
228   // Does not release the fence sync, but releases callbacks waiting on that
229   // fence sync.
230   void EnsureWaitReleased(uint64_t release, uint64_t callback_id);
231 
232   void ReleaseFenceSyncHelper(uint64_t release);
233 
234   // Sync point manager is guaranteed to exist in the lifetime of the client.
235   SyncPointManager* sync_point_manager_ = nullptr;
236 
237   // Global order data where releases will originate from.
238   scoped_refptr<SyncPointOrderData> order_data_;
239 
240   // Unique namespace/client id pair for this sync point client.
241   const CommandBufferNamespace namespace_id_;
242   const CommandBufferId command_buffer_id_;
243 
244   // Protects fence_sync_release_, fence_callback_queue_.
245   base::Lock fence_sync_lock_;
246 
247   // Current fence sync release that has been signaled.
248   uint64_t fence_sync_release_ = 0;
249 
250   // In well defined fence sync operations, fence syncs are released in order
251   // so simply having a priority queue for callbacks is enough.
252   ReleaseCallbackQueue release_callback_queue_;
253 
254   DISALLOW_COPY_AND_ASSIGN(SyncPointClientState);
255 };
256 
257 // This class manages the sync points, which allow cross-channel
258 // synchronization.
259 class GPU_EXPORT SyncPointManager {
260  public:
261   SyncPointManager();
262   ~SyncPointManager();
263 
264   scoped_refptr<SyncPointOrderData> CreateSyncPointOrderData();
265 
266   scoped_refptr<SyncPointClientState> CreateSyncPointClientState(
267       CommandBufferNamespace namespace_id,
268       CommandBufferId command_buffer_id,
269       SequenceId sequence_id);
270 
271   // Returns true if the sync token has been released or if the command
272   // buffer does not exist.
273   bool IsSyncTokenReleased(const SyncToken& sync_token);
274 
275   // Returns the sequence ID that will release this sync token.
276   SequenceId GetSyncTokenReleaseSequenceId(const SyncToken& sync_token);
277 
278   // Returns the global last processed order number.
279   uint32_t GetProcessedOrderNum() const;
280 
281   // // Returns the global last unprocessed order number.
282   uint32_t GetUnprocessedOrderNum() const;
283 
284   // If the wait is valid (sync token hasn't been processed or command buffer
285   // does not exist), the callback is queued to run when the sync point is
286   // released. If the wait is invalid, the callback is NOT run. The callback
287   // runs on the thread the sync point is released. Clients should use
288   // SyncPointClient::Wait because that uses order data to prevent deadlocks.
289   bool Wait(const SyncToken& sync_token,
290             SequenceId sequence_id,
291             uint32_t wait_order_num,
292             base::OnceClosure callback);
293 
294   // Like Wait but runs the callback on the given task runner's thread.
295   bool WaitNonThreadSafe(
296       const SyncToken& sync_token,
297       SequenceId sequence_id,
298       uint32_t wait_order_num,
299       scoped_refptr<base::SingleThreadTaskRunner> task_runner,
300       base::OnceClosure callback);
301 
302   // WaitOutOfOrder allows waiting for a sync token indefinitely, so it
303   // should be used with trusted sync tokens only.
304   bool WaitOutOfOrder(const SyncToken& trusted_sync_token,
305                       base::OnceClosure callback);
306 
307   // Used by SyncPointOrderData.
308   uint32_t GenerateOrderNumber();
309 
310   void DestroyedSyncPointOrderData(SequenceId sequence_id);
311 
312   void DestroyedSyncPointClientState(CommandBufferNamespace namespace_id,
313                                      CommandBufferId command_buffer_id);
314 
315  private:
316   using ClientStateMap = std::unordered_map<CommandBufferId,
317                                             scoped_refptr<SyncPointClientState>,
318                                             CommandBufferId::Hasher>;
319 
320   using OrderDataMap = std::unordered_map<SequenceId,
321                                           scoped_refptr<SyncPointOrderData>,
322                                           SequenceId::Hasher>;
323 
324   scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(
325       SequenceId sequence_id);
326 
327   scoped_refptr<SyncPointClientState> GetSyncPointClientState(
328       CommandBufferNamespace namespace_id,
329       CommandBufferId command_buffer_id);
330 
331   // Order number is global for all clients.
332   base::AtomicSequenceNumber order_num_generator_;
333 
334   // The following are protected by |lock_|.
335   // Map of command buffer id to client state for each namespace.
336   ClientStateMap client_state_maps_[NUM_COMMAND_BUFFER_NAMESPACES];
337 
338   // Map of sequence id to order data.
339   OrderDataMap order_data_map_;
340 
341   SequenceId::Generator sequence_id_generator_;
342 
343   mutable base::Lock lock_;
344 
345   DISALLOW_COPY_AND_ASSIGN(SyncPointManager);
346 };
347 
348 }  // namespace gpu
349 
350 #endif  // GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
351