1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "gpu/ipc/client/command_buffer_proxy_impl.h"
6 
7 #include <memory>
8 
9 #include "base/bind.h"
10 #include "base/callback.h"
11 #include "base/command_line.h"
12 #include "base/location.h"
13 #include "base/logging.h"
14 #include "base/optional.h"
15 #include "base/stl_util.h"
16 #include "base/threading/thread_task_runner_handle.h"
17 #include "base/trace_event/trace_event.h"
18 #include "build/build_config.h"
19 #include "gpu/command_buffer/client/gpu_control_client.h"
20 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 #include "gpu/command_buffer/common/cmd_buffer_common.h"
22 #include "gpu/command_buffer/common/command_buffer_id.h"
23 #include "gpu/command_buffer/common/command_buffer_shared.h"
24 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
25 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
26 #include "gpu/command_buffer/common/presentation_feedback_utils.h"
27 #include "gpu/command_buffer/common/sync_token.h"
28 #include "gpu/ipc/client/gpu_channel_host.h"
29 #include "gpu/ipc/common/command_buffer_id.h"
30 #include "gpu/ipc/common/gpu_messages.h"
31 #include "gpu/ipc/common/gpu_param_traits.h"
32 #include "mojo/public/cpp/system/buffer.h"
33 #include "mojo/public/cpp/system/platform_handle.h"
34 #include "ui/gfx/buffer_format_util.h"
35 #include "ui/gfx/geometry/size.h"
36 #include "ui/gfx/gpu_fence.h"
37 #include "ui/gl/gl_bindings.h"
38 #include "ui/gl/gpu_preference.h"
39 
40 namespace gpu {
41 
CommandBufferProxyImpl(scoped_refptr<GpuChannelHost> channel,GpuMemoryBufferManager * gpu_memory_buffer_manager,int32_t stream_id,scoped_refptr<base::SingleThreadTaskRunner> task_runner)42 CommandBufferProxyImpl::CommandBufferProxyImpl(
43     scoped_refptr<GpuChannelHost> channel,
44     GpuMemoryBufferManager* gpu_memory_buffer_manager,
45     int32_t stream_id,
46     scoped_refptr<base::SingleThreadTaskRunner> task_runner)
47     : channel_(std::move(channel)),
48       gpu_memory_buffer_manager_(gpu_memory_buffer_manager),
49       channel_id_(channel_->channel_id()),
50       route_id_(channel_->GenerateRouteID()),
51       stream_id_(stream_id),
52       command_buffer_id_(
53           CommandBufferIdFromChannelAndRoute(channel_id_, route_id_)),
54       callback_thread_(std::move(task_runner)) {
55   DCHECK(route_id_);
56 }
57 
~CommandBufferProxyImpl()58 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
59   for (auto& observer : deletion_observers_)
60     observer.OnWillDeleteImpl();
61   DisconnectChannel();
62 }
63 
Initialize(gpu::SurfaceHandle surface_handle,CommandBufferProxyImpl * share_group,gpu::SchedulingPriority stream_priority,const gpu::ContextCreationAttribs & attribs,const GURL & active_url)64 ContextResult CommandBufferProxyImpl::Initialize(
65     gpu::SurfaceHandle surface_handle,
66     CommandBufferProxyImpl* share_group,
67     gpu::SchedulingPriority stream_priority,
68     const gpu::ContextCreationAttribs& attribs,
69     const GURL& active_url) {
70   DCHECK(!share_group || (stream_id_ == share_group->stream_id_));
71   TRACE_EVENT1("gpu", "GpuChannelHost::CreateViewCommandBuffer",
72                "surface_handle", surface_handle);
73 
74   // Drop the |channel_| if this method does not succeed and early-outs, to
75   // prevent cleanup on destruction.
76   auto channel = std::move(channel_);
77 
78   GPUCreateCommandBufferConfig init_params;
79   init_params.surface_handle = surface_handle;
80   init_params.share_group_id =
81       share_group ? share_group->route_id_ : MSG_ROUTING_NONE;
82   init_params.stream_id = stream_id_;
83   init_params.stream_priority = stream_priority;
84   init_params.attribs = attribs;
85   init_params.active_url = active_url;
86 
87   TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
88   std::tie(shared_state_shm_, shared_state_mapping_) =
89       AllocateAndMapSharedMemory(sizeof(*shared_state()));
90   if (!shared_state_shm_.IsValid()) {
91     LOG(ERROR) << "ContextResult::kFatalFailure: "
92                   "AllocateAndMapSharedMemory failed";
93     return ContextResult::kFatalFailure;
94   }
95 
96   shared_state()->Initialize();
97 
98   base::UnsafeSharedMemoryRegion region = shared_state_shm_.Duplicate();
99   if (!region.IsValid()) {
100     // TODO(piman): ShareToGpuProcess should alert if it is failing due to
101     // being out of file descriptors, in which case this is a fatal error
102     // that won't be recovered from.
103     LOG(ERROR) << "ContextResult::kTransientFailure: "
104                   "Shared memory region is not valid";
105     return ContextResult::kTransientFailure;
106   }
107 
108   // Route must be added before sending the message, otherwise messages sent
109   // from the GPU process could race against adding ourselves to the filter.
110   channel->AddRouteWithTaskRunner(route_id_, weak_ptr_factory_.GetWeakPtr(),
111                                   callback_thread_);
112 
113   // We're blocking the UI thread, which is generally undesirable.
114   // In this case we need to wait for this before we can show any UI /anyway/,
115   // so it won't cause additional jank.
116   // TODO(piman): Make this asynchronous (http://crbug.com/125248).
117   ContextResult result = ContextResult::kSuccess;
118   bool sent = channel->Send(new GpuChannelMsg_CreateCommandBuffer(
119       init_params, route_id_, std::move(region), &result, &capabilities_));
120   if (!sent) {
121     channel->RemoveRoute(route_id_);
122     LOG(ERROR) << "ContextResult::kTransientFailure: "
123                   "Failed to send GpuChannelMsg_CreateCommandBuffer.";
124     return ContextResult::kTransientFailure;
125   }
126   if (result != ContextResult::kSuccess) {
127     DLOG(ERROR) << "Failure processing GpuChannelMsg_CreateCommandBuffer.";
128     channel->RemoveRoute(route_id_);
129     return result;
130   }
131 
132   channel_ = std::move(channel);
133   return result;
134 }
135 
OnMessageReceived(const IPC::Message & message)136 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
137   base::AutoLockMaybe lock(lock_);
138   bool handled = true;
139   IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
140     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
141     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
142     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_GpuSwitched, OnGpuSwitched);
143     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck, OnSignalAck);
144     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted,
145                         OnSwapBuffersCompleted);
146     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_BufferPresented, OnBufferPresented);
147     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_GetGpuFenceHandleComplete,
148                         OnGetGpuFenceHandleComplete);
149     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ReturnData, OnReturnData);
150     IPC_MESSAGE_UNHANDLED(handled = false)
151   IPC_END_MESSAGE_MAP()
152 
153   if (!handled) {
154     LOG(ERROR) << "Gpu process sent invalid message.";
155     base::AutoLock last_state_lock(last_state_lock_);
156     OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage,
157                            gpu::error::kLostContext);
158   }
159   return handled;
160 }
161 
OnChannelError()162 void CommandBufferProxyImpl::OnChannelError() {
163   base::AutoLockMaybe lock(lock_);
164   base::AutoLock last_state_lock(last_state_lock_);
165 
166   gpu::error::ContextLostReason context_lost_reason =
167       gpu::error::kGpuChannelLost;
168   if (shared_state_mapping_.IsValid()) {
169     // The GPU process might have intentionally been crashed
170     // (exit_on_context_lost), so try to find out the original reason.
171     TryUpdateStateDontReportError();
172     if (last_state_.error == gpu::error::kLostContext)
173       context_lost_reason = last_state_.context_lost_reason;
174   }
175   OnGpuAsyncMessageError(context_lost_reason, gpu::error::kLostContext);
176 }
177 
OnDestroyed(gpu::error::ContextLostReason reason,gpu::error::Error error)178 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
179                                          gpu::error::Error error) {
180   base::AutoLock lock(last_state_lock_);
181   OnGpuAsyncMessageError(reason, error);
182 }
183 
OnConsoleMessage(const GPUCommandBufferConsoleMessage & message)184 void CommandBufferProxyImpl::OnConsoleMessage(
185     const GPUCommandBufferConsoleMessage& message) {
186   if (gpu_control_client_)
187     gpu_control_client_->OnGpuControlErrorMessage(message.message.c_str(),
188                                                   message.id);
189 }
190 
OnGpuSwitched(gl::GpuPreference active_gpu_heuristic)191 void CommandBufferProxyImpl::OnGpuSwitched(
192     gl::GpuPreference active_gpu_heuristic) {
193   if (gpu_control_client_)
194     gpu_control_client_->OnGpuSwitched(active_gpu_heuristic);
195 }
196 
AddDeletionObserver(DeletionObserver * observer)197 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
198   std::unique_ptr<base::AutoLock> lock;
199   if (lock_)
200     lock.reset(new base::AutoLock(*lock_));
201   deletion_observers_.AddObserver(observer);
202 }
203 
RemoveDeletionObserver(DeletionObserver * observer)204 void CommandBufferProxyImpl::RemoveDeletionObserver(
205     DeletionObserver* observer) {
206   std::unique_ptr<base::AutoLock> lock;
207   if (lock_)
208     lock.reset(new base::AutoLock(*lock_));
209   deletion_observers_.RemoveObserver(observer);
210 }
211 
OnSignalAck(uint32_t id,const CommandBuffer::State & state)212 void CommandBufferProxyImpl::OnSignalAck(uint32_t id,
213                                          const CommandBuffer::State& state) {
214   {
215     base::AutoLock lock(last_state_lock_);
216     SetStateFromMessageReply(state);
217     if (last_state_.error != gpu::error::kNoError)
218       return;
219   }
220   SignalTaskMap::iterator it = signal_tasks_.find(id);
221   if (it == signal_tasks_.end()) {
222     LOG(ERROR) << "Gpu process sent invalid SignalAck.";
223     base::AutoLock lock(last_state_lock_);
224     OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage,
225                            gpu::error::kLostContext);
226     return;
227   }
228   base::OnceClosure callback = std::move(it->second);
229   signal_tasks_.erase(it);
230   std::move(callback).Run();
231 }
232 
GetLastState()233 CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
234   base::AutoLock lock(last_state_lock_);
235   TryUpdateState();
236   return last_state_;
237 }
238 
Flush(int32_t put_offset)239 void CommandBufferProxyImpl::Flush(int32_t put_offset) {
240   CheckLock();
241   base::AutoLock lock(last_state_lock_);
242   if (last_state_.error != gpu::error::kNoError)
243     return;
244 
245   TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset",
246                put_offset);
247 
248   OrderingBarrierHelper(put_offset);
249 
250   // Don't send messages once disconnected.
251   if (!disconnected_)
252     channel_->EnsureFlush(last_flush_id_);
253 }
254 
OrderingBarrier(int32_t put_offset)255 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
256   CheckLock();
257   base::AutoLock lock(last_state_lock_);
258   if (last_state_.error != gpu::error::kNoError)
259     return;
260 
261   TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset",
262                put_offset);
263 
264   OrderingBarrierHelper(put_offset);
265 }
266 
OrderingBarrierHelper(int32_t put_offset)267 void CommandBufferProxyImpl::OrderingBarrierHelper(int32_t put_offset) {
268   DCHECK(has_buffer_);
269 
270   if (last_put_offset_ == put_offset)
271     return;
272   last_put_offset_ = put_offset;
273   last_flush_id_ = channel_->OrderingBarrier(
274       route_id_, put_offset, std::move(pending_sync_token_fences_));
275 }
276 
SetUpdateVSyncParametersCallback(const UpdateVSyncParametersCallback & callback)277 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
278     const UpdateVSyncParametersCallback& callback) {
279   CheckLock();
280   update_vsync_parameters_completion_callback_ = callback;
281 }
282 
WaitForTokenInRange(int32_t start,int32_t end)283 gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForTokenInRange(
284     int32_t start,
285     int32_t end) {
286   CheckLock();
287   base::AutoLock lock(last_state_lock_);
288   TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start,
289                "end", end);
290   // Error needs to be checked in case the state was updated on another thread.
291   // We need to make sure that the reentrant context loss callback is called so
292   // that the share group is also lost before we return any error up the stack.
293   if (last_state_.error != gpu::error::kNoError) {
294     if (gpu_control_client_)
295       gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
296     return last_state_;
297   }
298   TryUpdateState();
299   if (!InRange(start, end, last_state_.token) &&
300       last_state_.error == gpu::error::kNoError) {
301     gpu::CommandBuffer::State state;
302     if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end,
303                                                          &state))) {
304       SetStateFromMessageReply(state);
305     }
306   }
307   if (!InRange(start, end, last_state_.token) &&
308       last_state_.error == gpu::error::kNoError) {
309     LOG(ERROR) << "GPU state invalid after WaitForTokenInRange.";
310     OnGpuSyncReplyError();
311   }
312   return last_state_;
313 }
314 
WaitForGetOffsetInRange(uint32_t set_get_buffer_count,int32_t start,int32_t end)315 gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForGetOffsetInRange(
316     uint32_t set_get_buffer_count,
317     int32_t start,
318     int32_t end) {
319   CheckLock();
320   base::AutoLock lock(last_state_lock_);
321   TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start",
322                start, "end", end);
323   // Error needs to be checked in case the state was updated on another thread.
324   // We need to make sure that the reentrant context loss callback is called so
325   // that the share group is also lost before we return any error up the stack.
326   if (last_state_.error != gpu::error::kNoError) {
327     if (gpu_control_client_)
328       gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
329     return last_state_;
330   }
331   TryUpdateState();
332   if (((set_get_buffer_count != last_state_.set_get_buffer_count) ||
333        !InRange(start, end, last_state_.get_offset)) &&
334       last_state_.error == gpu::error::kNoError) {
335     gpu::CommandBuffer::State state;
336     if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
337             route_id_, set_get_buffer_count, start, end, &state)))
338       SetStateFromMessageReply(state);
339   }
340   if (((set_get_buffer_count != last_state_.set_get_buffer_count) ||
341        !InRange(start, end, last_state_.get_offset)) &&
342       last_state_.error == gpu::error::kNoError) {
343     LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange.";
344     OnGpuSyncReplyError();
345   }
346   return last_state_;
347 }
348 
SetGetBuffer(int32_t shm_id)349 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
350   CheckLock();
351   base::AutoLock lock(last_state_lock_);
352   if (last_state_.error != gpu::error::kNoError)
353     return;
354 
355   Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
356   last_put_offset_ = -1;
357   has_buffer_ = (shm_id > 0);
358 }
359 
CreateTransferBuffer(uint32_t size,int32_t * id,TransferBufferAllocationOption option)360 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
361     uint32_t size,
362     int32_t* id,
363     TransferBufferAllocationOption option) {
364   CheckLock();
365   base::AutoLock lock(last_state_lock_);
366   *id = -1;
367 
368   int32_t new_id = GetNextBufferId();
369 
370   base::UnsafeSharedMemoryRegion shared_memory_region;
371   base::WritableSharedMemoryMapping shared_memory_mapping;
372   std::tie(shared_memory_region, shared_memory_mapping) =
373       AllocateAndMapSharedMemory(size);
374   if (!shared_memory_mapping.IsValid()) {
375     if (last_state_.error == gpu::error::kNoError &&
376         option != TransferBufferAllocationOption::kReturnNullOnOOM)
377       OnClientError(gpu::error::kOutOfBounds);
378     return nullptr;
379   }
380   DCHECK_LE(shared_memory_mapping.size(), static_cast<size_t>(UINT32_MAX));
381 
382   if (last_state_.error == gpu::error::kNoError) {
383     base::UnsafeSharedMemoryRegion region = shared_memory_region.Duplicate();
384     if (!region.IsValid()) {
385       if (last_state_.error == gpu::error::kNoError)
386         OnClientError(gpu::error::kLostContext);
387       return nullptr;
388     }
389     Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, new_id,
390                                                         std::move(region)));
391   }
392 
393   *id = new_id;
394   scoped_refptr<gpu::Buffer> buffer(gpu::MakeBufferFromSharedMemory(
395       std::move(shared_memory_region), std::move(shared_memory_mapping)));
396   return buffer;
397 }
398 
DestroyTransferBuffer(int32_t id)399 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) {
400   CheckLock();
401   base::AutoLock lock(last_state_lock_);
402   if (last_state_.error != gpu::error::kNoError)
403     return;
404 
405   last_flush_id_ = channel_->EnqueueDeferredMessage(
406       GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
407 }
408 
SetGpuControlClient(GpuControlClient * client)409 void CommandBufferProxyImpl::SetGpuControlClient(GpuControlClient* client) {
410   CheckLock();
411   gpu_control_client_ = client;
412 }
413 
GetCapabilities() const414 const gpu::Capabilities& CommandBufferProxyImpl::GetCapabilities() const {
415   return capabilities_;
416 }
417 
CreateImage(ClientBuffer buffer,size_t width,size_t height)418 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
419                                             size_t width,
420                                             size_t height) {
421   CheckLock();
422   base::AutoLock lock(last_state_lock_);
423   if (last_state_.error != gpu::error::kNoError)
424     return -1;
425 
426   int32_t new_id = channel_->ReserveImageId();
427 
428   gfx::GpuMemoryBuffer* gpu_memory_buffer =
429       reinterpret_cast<gfx::GpuMemoryBuffer*>(buffer);
430   DCHECK(gpu_memory_buffer);
431 
432   // This handle is owned by the GPU process and must be passed to it or it
433   // will leak. In otherwords, do not early out on error between here and the
434   // sending of the CreateImage IPC below.
435   gfx::GpuMemoryBufferHandle handle = gpu_memory_buffer->CloneHandle();
436   bool requires_sync_token = handle.type == gfx::IO_SURFACE_BUFFER;
437 
438   uint64_t image_fence_sync = 0;
439   if (requires_sync_token)
440     image_fence_sync = GenerateFenceSyncRelease();
441 
442   DCHECK(gpu::IsImageFromGpuMemoryBufferFormatSupported(
443       gpu_memory_buffer->GetFormat(), capabilities_))
444       << gfx::BufferFormatToString(gpu_memory_buffer->GetFormat());
445   DCHECK(gpu::IsImageSizeValidForGpuMemoryBufferFormat(
446       gfx::Size(width, height), gpu_memory_buffer->GetFormat()))
447       << gfx::BufferFormatToString(gpu_memory_buffer->GetFormat());
448 
449   GpuCommandBufferMsg_CreateImage_Params params;
450   params.id = new_id;
451   params.gpu_memory_buffer = std::move(handle);
452   params.size = gfx::Size(width, height);
453   params.format = gpu_memory_buffer->GetFormat();
454   params.image_release_count = image_fence_sync;
455 
456   Send(new GpuCommandBufferMsg_CreateImage(route_id_, params));
457 
458   if (image_fence_sync) {
459     gpu::SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(),
460                               image_fence_sync);
461 
462     // Force a synchronous IPC to validate sync token.
463     EnsureWorkVisible();
464     sync_token.SetVerifyFlush();
465 
466     gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer,
467                                                         sync_token);
468   }
469 
470   return new_id;
471 }
472 
DestroyImage(int32_t id)473 void CommandBufferProxyImpl::DestroyImage(int32_t id) {
474   CheckLock();
475   base::AutoLock lock(last_state_lock_);
476   if (last_state_.error != gpu::error::kNoError)
477     return;
478 
479   Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id));
480 }
481 
SetLock(base::Lock * lock)482 void CommandBufferProxyImpl::SetLock(base::Lock* lock) {
483   lock_ = lock;
484 }
485 
EnsureWorkVisible()486 void CommandBufferProxyImpl::EnsureWorkVisible() {
487   // Don't send messages once disconnected.
488   if (!disconnected_)
489     channel_->VerifyFlush(UINT32_MAX);
490 }
491 
GetNamespaceID() const492 gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const {
493   return gpu::CommandBufferNamespace::GPU_IO;
494 }
495 
GetCommandBufferID() const496 gpu::CommandBufferId CommandBufferProxyImpl::GetCommandBufferID() const {
497   return command_buffer_id_;
498 }
499 
FlushPendingWork()500 void CommandBufferProxyImpl::FlushPendingWork() {
501   // Don't send messages once disconnected.
502   if (!disconnected_)
503     channel_->EnsureFlush(UINT32_MAX);
504 }
505 
GenerateFenceSyncRelease()506 uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() {
507   CheckLock();
508   return next_fence_sync_release_++;
509 }
510 
511 // This can be called from any thread without holding |lock_|. Use a thread-safe
512 // non-error throwing variant of TryUpdateState for this.
IsFenceSyncReleased(uint64_t release)513 bool CommandBufferProxyImpl::IsFenceSyncReleased(uint64_t release) {
514   base::AutoLock lock(last_state_lock_);
515   TryUpdateStateThreadSafe();
516   return release <= last_state_.release_count;
517 }
518 
SignalSyncToken(const gpu::SyncToken & sync_token,base::OnceClosure callback)519 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
520                                              base::OnceClosure callback) {
521   CheckLock();
522   base::AutoLock lock(last_state_lock_);
523   if (last_state_.error != gpu::error::kNoError)
524     return;
525 
526   uint32_t signal_id = next_signal_id_++;
527   Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token,
528                                                signal_id));
529   signal_tasks_.insert(std::make_pair(signal_id, std::move(callback)));
530 }
531 
WaitSyncToken(const gpu::SyncToken & sync_token)532 void CommandBufferProxyImpl::WaitSyncToken(const gpu::SyncToken& sync_token) {
533   CheckLock();
534   base::AutoLock lock(last_state_lock_);
535   if (last_state_.error != gpu::error::kNoError)
536     return;
537 
538   pending_sync_token_fences_.push_back(sync_token);
539 }
540 
CanWaitUnverifiedSyncToken(const gpu::SyncToken & sync_token)541 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
542     const gpu::SyncToken& sync_token) {
543   // Can only wait on an unverified sync token if it is from the same channel.
544   int sync_token_channel_id =
545       ChannelIdFromCommandBufferId(sync_token.command_buffer_id());
546   if (sync_token.namespace_id() != gpu::CommandBufferNamespace::GPU_IO ||
547       sync_token_channel_id != channel_id_) {
548     return false;
549   }
550   return true;
551 }
552 
SignalQuery(uint32_t query,base::OnceClosure callback)553 void CommandBufferProxyImpl::SignalQuery(uint32_t query,
554                                          base::OnceClosure callback) {
555   CheckLock();
556   base::AutoLock lock(last_state_lock_);
557   if (last_state_.error != gpu::error::kNoError)
558     return;
559 
560   // Signal identifiers are hidden, so nobody outside of this class will see
561   // them. (And thus, they cannot save them.) The IDs themselves only last
562   // until the callback is invoked, which will happen as soon as the GPU
563   // catches upwith the command buffer.
564   // A malicious caller trying to create a collision by making next_signal_id
565   // would have to make calls at an astounding rate (300B/s) and even if they
566   // could do that, all they would do is to prevent some callbacks from getting
567   // called, leading to stalled threads and/or memory leaks.
568   uint32_t signal_id = next_signal_id_++;
569   Send(new GpuCommandBufferMsg_SignalQuery(route_id_, query, signal_id));
570   signal_tasks_.insert(std::make_pair(signal_id, std::move(callback)));
571 }
572 
CreateGpuFence(uint32_t gpu_fence_id,ClientGpuFence source)573 void CommandBufferProxyImpl::CreateGpuFence(uint32_t gpu_fence_id,
574                                             ClientGpuFence source) {
575   CheckLock();
576   base::AutoLock lock(last_state_lock_);
577   if (last_state_.error != gpu::error::kNoError) {
578     DLOG(ERROR) << "got error=" << last_state_.error;
579     return;
580   }
581 
582   // IPC accepts handles by const reference. However, on platforms where the
583   // handle is backed by base::ScopedFD, const is casted away and the handle is
584   // forcibly taken from you.
585   gfx::GpuFence* gpu_fence = gfx::GpuFence::FromClientGpuFence(source);
586   gfx::GpuFenceHandle handle = gpu_fence->GetGpuFenceHandle().Clone();
587   Send(new GpuCommandBufferMsg_CreateGpuFenceFromHandle(route_id_, gpu_fence_id,
588                                                         handle));
589 }
590 
SetDisplayTransform(gfx::OverlayTransform transform)591 void CommandBufferProxyImpl::SetDisplayTransform(
592     gfx::OverlayTransform transform) {
593   NOTREACHED();
594 }
595 
GetGpuFence(uint32_t gpu_fence_id,base::OnceCallback<void (std::unique_ptr<gfx::GpuFence>)> callback)596 void CommandBufferProxyImpl::GetGpuFence(
597     uint32_t gpu_fence_id,
598     base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
599   CheckLock();
600   base::AutoLock lock(last_state_lock_);
601   if (last_state_.error != gpu::error::kNoError) {
602     DLOG(ERROR) << "got error=" << last_state_.error;
603     return;
604   }
605 
606   Send(new GpuCommandBufferMsg_GetGpuFenceHandle(route_id_, gpu_fence_id));
607   get_gpu_fence_tasks_.emplace(gpu_fence_id, std::move(callback));
608 }
609 
OnGetGpuFenceHandleComplete(uint32_t gpu_fence_id,gfx::GpuFenceHandle handle)610 void CommandBufferProxyImpl::OnGetGpuFenceHandleComplete(
611     uint32_t gpu_fence_id,
612     gfx::GpuFenceHandle handle) {
613   // Always consume the provided handle to avoid leaks on error.
614   auto gpu_fence = std::make_unique<gfx::GpuFence>(std::move(handle));
615 
616   GetGpuFenceTaskMap::iterator it = get_gpu_fence_tasks_.find(gpu_fence_id);
617   if (it == get_gpu_fence_tasks_.end()) {
618     DLOG(ERROR) << "GPU process sent invalid GetGpuFenceHandle response.";
619     base::AutoLock lock(last_state_lock_);
620     OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage,
621                            gpu::error::kLostContext);
622     return;
623   }
624   auto callback = std::move(it->second);
625   get_gpu_fence_tasks_.erase(it);
626   std::move(callback).Run(std::move(gpu_fence));
627 }
628 
OnReturnData(const std::vector<uint8_t> & data)629 void CommandBufferProxyImpl::OnReturnData(const std::vector<uint8_t>& data) {
630   if (gpu_control_client_) {
631     gpu_control_client_->OnGpuControlReturnData(data);
632   }
633 }
634 
TakeFrontBuffer(const gpu::Mailbox & mailbox)635 void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) {
636   CheckLock();
637   base::AutoLock lock(last_state_lock_);
638   if (last_state_.error != gpu::error::kNoError)
639     return;
640 
641   // TakeFrontBuffer should be a deferred message so that it's sequenced
642   // correctly with respect to preceding ReturnFrontBuffer messages.
643   last_flush_id_ = channel_->EnqueueDeferredMessage(
644       GpuCommandBufferMsg_TakeFrontBuffer(route_id_, mailbox));
645 }
646 
ReturnFrontBuffer(const gpu::Mailbox & mailbox,const gpu::SyncToken & sync_token,bool is_lost)647 void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox,
648                                                const gpu::SyncToken& sync_token,
649                                                bool is_lost) {
650   CheckLock();
651   base::AutoLock lock(last_state_lock_);
652   if (last_state_.error != gpu::error::kNoError)
653     return;
654 
655   last_flush_id_ = channel_->EnqueueDeferredMessage(
656       GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost),
657       {sync_token});
658 }
659 
Send(IPC::Message * msg)660 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
661   DCHECK(channel_);
662   DCHECK_EQ(gpu::error::kNoError, last_state_.error);
663 
664   last_state_lock_.Release();
665 
666   // Call is_sync() before sending message.
667   bool is_sync = msg->is_sync();
668   bool result = channel_->Send(msg);
669   // Send() should always return true for async messages.
670   DCHECK(is_sync || result);
671 
672   last_state_lock_.Acquire();
673 
674   if (last_state_.error != gpu::error::kNoError) {
675     // Error needs to be checked in case the state was updated on another thread
676     // while we were waiting on Send. We need to make sure that the reentrant
677     // context loss callback is called so that the share group is also lost
678     // before we return any error up the stack.
679     if (gpu_control_client_)
680       gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
681     return false;
682   }
683 
684   if (!result) {
685     // Flag the command buffer as lost. Defer deleting the channel until
686     // OnChannelError is called after returning to the message loop in case it
687     // is referenced elsewhere.
688     DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
689     OnClientError(gpu::error::kLostContext);
690     return false;
691   }
692 
693   return true;
694 }
695 
696 std::pair<base::UnsafeSharedMemoryRegion, base::WritableSharedMemoryMapping>
AllocateAndMapSharedMemory(size_t size)697 CommandBufferProxyImpl::AllocateAndMapSharedMemory(size_t size) {
698   base::UnsafeSharedMemoryRegion region =
699       base::UnsafeSharedMemoryRegion::Create(size);
700   if (!region.IsValid()) {
701     DLOG(ERROR) << "AllocateAndMapSharedMemory: Allocation failed";
702     return {};
703   }
704 
705   base::WritableSharedMemoryMapping mapping = region.Map();
706   if (!mapping.IsValid()) {
707     DLOG(ERROR) << "AllocateAndMapSharedMemory: Map failed";
708     return {};
709   }
710 
711   return {std::move(region), std::move(mapping)};
712 }
713 
SetStateFromMessageReply(const gpu::CommandBuffer::State & state)714 void CommandBufferProxyImpl::SetStateFromMessageReply(
715     const gpu::CommandBuffer::State& state) {
716   CheckLock();
717   if (last_state_.error != gpu::error::kNoError)
718     return;
719   // Handle wraparound. It works as long as we don't have more than 2B state
720   // updates in flight across which reordering occurs.
721   if (state.generation - last_state_.generation < 0x80000000U)
722     last_state_ = state;
723   if (last_state_.error != gpu::error::kNoError)
724     OnGpuStateError();
725 }
726 
TryUpdateState()727 void CommandBufferProxyImpl::TryUpdateState() {
728   CheckLock();
729   if (last_state_.error == gpu::error::kNoError) {
730     shared_state()->Read(&last_state_);
731     if (last_state_.error != gpu::error::kNoError)
732       OnGpuStateError();
733   }
734 }
735 
TryUpdateStateThreadSafe()736 void CommandBufferProxyImpl::TryUpdateStateThreadSafe() {
737   if (last_state_.error == gpu::error::kNoError) {
738     shared_state()->Read(&last_state_);
739     if (last_state_.error != gpu::error::kNoError) {
740       callback_thread_->PostTask(
741           FROM_HERE,
742           base::BindOnce(&CommandBufferProxyImpl::LockAndDisconnectChannel,
743                          weak_ptr_factory_.GetWeakPtr()));
744     }
745   }
746 }
747 
TryUpdateStateDontReportError()748 void CommandBufferProxyImpl::TryUpdateStateDontReportError() {
749   if (last_state_.error == gpu::error::kNoError)
750     shared_state()->Read(&last_state_);
751 }
752 
shared_state() const753 gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
754   return reinterpret_cast<gpu::CommandBufferSharedState*>(
755       shared_state_mapping_.memory());
756 }
757 
OnSwapBuffersCompleted(const SwapBuffersCompleteParams & params)758 void CommandBufferProxyImpl::OnSwapBuffersCompleted(
759     const SwapBuffersCompleteParams& params) {
760   if (gpu_control_client_)
761     gpu_control_client_->OnGpuControlSwapBuffersCompleted(params);
762 }
763 
OnBufferPresented(uint64_t swap_id,const gfx::PresentationFeedback & feedback)764 void CommandBufferProxyImpl::OnBufferPresented(
765     uint64_t swap_id,
766     const gfx::PresentationFeedback& feedback) {
767   if (gpu_control_client_)
768     gpu_control_client_->OnSwapBufferPresented(swap_id, feedback);
769   if (update_vsync_parameters_completion_callback_ &&
770       ShouldUpdateVsyncParams(feedback)) {
771     update_vsync_parameters_completion_callback_.Run(feedback.timestamp,
772                                                      feedback.interval);
773   }
774 }
775 
OnGpuSyncReplyError()776 void CommandBufferProxyImpl::OnGpuSyncReplyError() {
777   CheckLock();
778   last_state_.error = gpu::error::kLostContext;
779   last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage;
780   // This method may be inside a callstack from the GpuControlClient (we got a
781   // bad reply to something we are sending to the GPU process). So avoid
782   // re-entering the GpuControlClient here.
783   DisconnectChannelInFreshCallStack();
784 }
785 
OnGpuAsyncMessageError(gpu::error::ContextLostReason reason,gpu::error::Error error)786 void CommandBufferProxyImpl::OnGpuAsyncMessageError(
787     gpu::error::ContextLostReason reason,
788     gpu::error::Error error) {
789   CheckLock();
790   last_state_.error = error;
791   last_state_.context_lost_reason = reason;
792   // This method only occurs when receiving IPC messages, so we know it's not in
793   // a callstack from the GpuControlClient. Unlock the state lock to prevent
794   // a deadlock when calling the context loss callback.
795   base::AutoUnlock unlock(last_state_lock_);
796   DisconnectChannel();
797 }
798 
OnGpuStateError()799 void CommandBufferProxyImpl::OnGpuStateError() {
800   CheckLock();
801   DCHECK_NE(gpu::error::kNoError, last_state_.error);
802   // This method may be inside a callstack from the GpuControlClient (we
803   // encountered an error while trying to perform some action). So avoid
804   // re-entering the GpuControlClient here.
805   DisconnectChannelInFreshCallStack();
806 }
807 
OnClientError(gpu::error::Error error)808 void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) {
809   CheckLock();
810   last_state_.error = error;
811   last_state_.context_lost_reason = gpu::error::kUnknown;
812   // This method may be inside a callstack from the GpuControlClient (we
813   // encountered an error while trying to perform some action). So avoid
814   // re-entering the GpuControlClient here.
815   DisconnectChannelInFreshCallStack();
816 }
817 
DisconnectChannelInFreshCallStack()818 void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() {
819   CheckLock();
820   // Inform the GpuControlClient of the lost state immediately, though this may
821   // be a re-entrant call to the client so we use the MaybeReentrant variant.
822   if (gpu_control_client_)
823     gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
824   // Create a fresh call stack to keep the |channel_| alive while we unwind the
825   // stack in case things will use it, and give the GpuChannelClient a chance to
826   // act fully on the lost context.
827   callback_thread_->PostTask(
828       FROM_HERE,
829       base::BindOnce(&CommandBufferProxyImpl::LockAndDisconnectChannel,
830                      weak_ptr_factory_.GetWeakPtr()));
831 }
832 
LockAndDisconnectChannel()833 void CommandBufferProxyImpl::LockAndDisconnectChannel() {
834   base::AutoLockMaybe lock(lock_);
835   DisconnectChannel();
836 }
837 
DisconnectChannel()838 void CommandBufferProxyImpl::DisconnectChannel() {
839   CheckLock();
840   // Prevent any further messages from being sent, and ensure we only call
841   // the client for lost context a single time.
842   if (!channel_ || disconnected_)
843     return;
844   disconnected_ = true;
845   channel_->VerifyFlush(UINT32_MAX);
846   channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_));
847   channel_->RemoveRoute(route_id_);
848   if (gpu_control_client_)
849     gpu_control_client_->OnGpuControlLostContext();
850 }
851 
852 }  // namespace gpu
853