1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/ipc/client/gpu_channel_host.h"
6
7 #include <algorithm>
8 #include <utility>
9
10 #include "base/atomic_sequence_num.h"
11 #include "base/bind.h"
12 #include "base/memory/ptr_util.h"
13 #include "base/metrics/histogram_macros.h"
14 #include "base/single_thread_task_runner.h"
15 #include "base/threading/thread_restrictions.h"
16 #include "base/threading/thread_task_runner_handle.h"
17 #include "build/build_config.h"
18 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
19 #include "gpu/ipc/client/client_shared_image_interface.h"
20 #include "gpu/ipc/common/command_buffer_id.h"
21 #include "gpu/ipc/common/gpu_messages.h"
22 #include "gpu/ipc/common/gpu_param_traits_macros.h"
23 #include "gpu/ipc/common/gpu_watchdog_timeout.h"
24 #include "ipc/ipc_channel_mojo.h"
25 #include "ipc/ipc_sync_message.h"
26 #include "ipc/trace_ipc_message.h"
27 #include "mojo/public/cpp/bindings/lib/message_quota_checker.h"
28 #include "url/gurl.h"
29
30 using base::AutoLock;
31
32 namespace gpu {
33
GpuChannelHost(int channel_id,const gpu::GPUInfo & gpu_info,const gpu::GpuFeatureInfo & gpu_feature_info,mojo::ScopedMessagePipeHandle handle)34 GpuChannelHost::GpuChannelHost(int channel_id,
35 const gpu::GPUInfo& gpu_info,
36 const gpu::GpuFeatureInfo& gpu_feature_info,
37 mojo::ScopedMessagePipeHandle handle)
38 : io_thread_(base::ThreadTaskRunnerHandle::Get()),
39 channel_id_(channel_id),
40 gpu_info_(gpu_info),
41 gpu_feature_info_(gpu_feature_info),
42 listener_(new Listener(std::move(handle), io_thread_),
43 base::OnTaskRunnerDeleter(io_thread_)),
44 shared_image_interface_(
45 this,
46 static_cast<int32_t>(
47 GpuChannelReservedRoutes::kSharedImageInterface)),
48 image_decode_accelerator_proxy_(
49 this,
50 static_cast<int32_t>(
51 GpuChannelReservedRoutes::kImageDecodeAccelerator)) {
52 next_image_id_.GetNext();
53 for (int32_t i = 0;
54 i <= static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue); ++i)
55 next_route_id_.GetNext();
56
57 #if defined(OS_MACOSX)
58 gpu::SetMacOSSpecificTextureTarget(gpu_info.macos_specific_texture_target);
59 #endif // defined(OS_MACOSX)
60 }
61
Send(IPC::Message * msg)62 bool GpuChannelHost::Send(IPC::Message* msg) {
63 TRACE_IPC_MESSAGE_SEND("ipc", "GpuChannelHost::Send", msg);
64
65 auto message = base::WrapUnique(msg);
66
67 DCHECK(!io_thread_->BelongsToCurrentThread());
68
69 // The GPU process never sends synchronous IPCs so clear the unblock flag to
70 // preserve order.
71 message->set_unblock(false);
72
73 if (!message->is_sync()) {
74 io_thread_->PostTask(FROM_HERE,
75 base::BindOnce(&Listener::SendMessage,
76 base::Unretained(listener_.get()),
77 std::move(message), nullptr));
78 return true;
79 }
80
81 base::WaitableEvent done_event(
82 base::WaitableEvent::ResetPolicy::MANUAL,
83 base::WaitableEvent::InitialState::NOT_SIGNALED);
84 auto deserializer = base::WrapUnique(
85 static_cast<IPC::SyncMessage*>(message.get())->GetReplyDeserializer());
86
87 IPC::PendingSyncMsg pending_sync(IPC::SyncMessage::GetMessageId(*message),
88 deserializer.get(), &done_event);
89 io_thread_->PostTask(
90 FROM_HERE,
91 base::BindOnce(&Listener::SendMessage, base::Unretained(listener_.get()),
92 std::move(message), &pending_sync));
93
94 // http://crbug.com/125264
95 base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
96
97 // TODO(magchen): crbug.com/949839. Remove this histogram and do only one
98 // done_event->Wait() after the GPU watchdog V2 is fully launched.
99 base::TimeTicks start_time = base::TimeTicks::Now();
100
101 // The wait for event is split into two phases so we can still record the
102 // case in which the GPU hangs but not killed. Also all data should be
103 // recorded in the range of max_wait_sec seconds for easier comparison.
104 bool signaled =
105 pending_sync.done_event->TimedWait(kGpuChannelHostMaxWaitTime);
106
107 base::TimeDelta wait_duration = base::TimeTicks::Now() - start_time;
108
109 // Histogram of wait-for-sync time, used for monitoring the GPU watchdog.
110 UMA_HISTOGRAM_CUSTOM_TIMES("GPU.GPUChannelHostWaitTime2", wait_duration,
111 base::TimeDelta::FromSeconds(1),
112 kGpuChannelHostMaxWaitTime, 50);
113
114 // Continue waiting for the event if not signaled
115 if (!signaled)
116 pending_sync.done_event->Wait();
117
118 return pending_sync.send_result;
119 }
120
OrderingBarrier(int32_t route_id,int32_t put_offset,std::vector<SyncToken> sync_token_fences)121 uint32_t GpuChannelHost::OrderingBarrier(
122 int32_t route_id,
123 int32_t put_offset,
124 std::vector<SyncToken> sync_token_fences) {
125 AutoLock lock(context_lock_);
126
127 if (pending_ordering_barrier_ &&
128 pending_ordering_barrier_->route_id != route_id)
129 EnqueuePendingOrderingBarrier();
130 if (!pending_ordering_barrier_)
131 pending_ordering_barrier_.emplace();
132
133 pending_ordering_barrier_->deferred_message_id = next_deferred_message_id_++;
134 pending_ordering_barrier_->route_id = route_id;
135 pending_ordering_barrier_->put_offset = put_offset;
136 pending_ordering_barrier_->sync_token_fences.insert(
137 pending_ordering_barrier_->sync_token_fences.end(),
138 std::make_move_iterator(sync_token_fences.begin()),
139 std::make_move_iterator(sync_token_fences.end()));
140 return pending_ordering_barrier_->deferred_message_id;
141 }
142
EnqueueDeferredMessage(const IPC::Message & message,std::vector<SyncToken> sync_token_fences)143 uint32_t GpuChannelHost::EnqueueDeferredMessage(
144 const IPC::Message& message,
145 std::vector<SyncToken> sync_token_fences) {
146 AutoLock lock(context_lock_);
147
148 EnqueuePendingOrderingBarrier();
149 enqueued_deferred_message_id_ = next_deferred_message_id_++;
150 GpuDeferredMessage deferred_message;
151 deferred_message.message = message;
152 deferred_message.sync_token_fences = std::move(sync_token_fences);
153 deferred_messages_.push_back(std::move(deferred_message));
154 return enqueued_deferred_message_id_;
155 }
156
EnsureFlush(uint32_t deferred_message_id)157 void GpuChannelHost::EnsureFlush(uint32_t deferred_message_id) {
158 AutoLock lock(context_lock_);
159 InternalFlush(deferred_message_id);
160 }
161
VerifyFlush(uint32_t deferred_message_id)162 void GpuChannelHost::VerifyFlush(uint32_t deferred_message_id) {
163 AutoLock lock(context_lock_);
164
165 InternalFlush(deferred_message_id);
166
167 if (deferred_message_id > verified_deferred_message_id_) {
168 Send(new GpuChannelMsg_Nop());
169 verified_deferred_message_id_ = flushed_deferred_message_id_;
170 }
171 }
172
EnqueuePendingOrderingBarrier()173 void GpuChannelHost::EnqueuePendingOrderingBarrier() {
174 context_lock_.AssertAcquired();
175 if (!pending_ordering_barrier_)
176 return;
177 DCHECK_LT(enqueued_deferred_message_id_,
178 pending_ordering_barrier_->deferred_message_id);
179 enqueued_deferred_message_id_ =
180 pending_ordering_barrier_->deferred_message_id;
181 GpuDeferredMessage deferred_message;
182 deferred_message.message = GpuCommandBufferMsg_AsyncFlush(
183 pending_ordering_barrier_->route_id,
184 pending_ordering_barrier_->put_offset,
185 pending_ordering_barrier_->deferred_message_id,
186 pending_ordering_barrier_->sync_token_fences);
187 deferred_message.sync_token_fences =
188 std::move(pending_ordering_barrier_->sync_token_fences);
189 deferred_messages_.push_back(std::move(deferred_message));
190 pending_ordering_barrier_.reset();
191 }
192
InternalFlush(uint32_t deferred_message_id)193 void GpuChannelHost::InternalFlush(uint32_t deferred_message_id) {
194 context_lock_.AssertAcquired();
195
196 EnqueuePendingOrderingBarrier();
197 if (!deferred_messages_.empty() &&
198 deferred_message_id > flushed_deferred_message_id_) {
199 DCHECK_EQ(enqueued_deferred_message_id_, next_deferred_message_id_ - 1);
200
201 Send(
202 new GpuChannelMsg_FlushDeferredMessages(std::move(deferred_messages_)));
203
204 deferred_messages_.clear();
205 flushed_deferred_message_id_ = next_deferred_message_id_ - 1;
206 }
207 }
208
DestroyChannel()209 void GpuChannelHost::DestroyChannel() {
210 io_thread_->PostTask(
211 FROM_HERE,
212 base::BindOnce(&Listener::Close, base::Unretained(listener_.get())));
213 }
214
AddRoute(int route_id,base::WeakPtr<IPC::Listener> listener)215 void GpuChannelHost::AddRoute(int route_id,
216 base::WeakPtr<IPC::Listener> listener) {
217 AddRouteWithTaskRunner(route_id, listener,
218 base::ThreadTaskRunnerHandle::Get());
219 }
220
AddRouteWithTaskRunner(int route_id,base::WeakPtr<IPC::Listener> listener,scoped_refptr<base::SingleThreadTaskRunner> task_runner)221 void GpuChannelHost::AddRouteWithTaskRunner(
222 int route_id,
223 base::WeakPtr<IPC::Listener> listener,
224 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
225 io_thread_->PostTask(
226 FROM_HERE, base::BindOnce(&GpuChannelHost::Listener::AddRoute,
227 base::Unretained(listener_.get()), route_id,
228 listener, task_runner));
229 }
230
RemoveRoute(int route_id)231 void GpuChannelHost::RemoveRoute(int route_id) {
232 io_thread_->PostTask(
233 FROM_HERE, base::BindOnce(&GpuChannelHost::Listener::RemoveRoute,
234 base::Unretained(listener_.get()), route_id));
235 }
236
ReserveImageId()237 int32_t GpuChannelHost::ReserveImageId() {
238 return next_image_id_.GetNext();
239 }
240
GenerateRouteID()241 int32_t GpuChannelHost::GenerateRouteID() {
242 return next_route_id_.GetNext();
243 }
244
CrashGpuProcessForTesting()245 void GpuChannelHost::CrashGpuProcessForTesting() {
246 Send(new GpuChannelMsg_CrashForTesting());
247 }
248
249 std::unique_ptr<ClientSharedImageInterface>
CreateClientSharedImageInterface()250 GpuChannelHost::CreateClientSharedImageInterface() {
251 return std::make_unique<ClientSharedImageInterface>(&shared_image_interface_);
252 }
253
254 GpuChannelHost::~GpuChannelHost() = default;
255
256 GpuChannelHost::Listener::RouteInfo::RouteInfo() = default;
257
258 GpuChannelHost::Listener::RouteInfo::RouteInfo(const RouteInfo& other) =
259 default;
260 GpuChannelHost::Listener::RouteInfo::RouteInfo(RouteInfo&& other) = default;
261 GpuChannelHost::Listener::RouteInfo::~RouteInfo() = default;
262
263 GpuChannelHost::Listener::RouteInfo& GpuChannelHost::Listener::RouteInfo::
264 operator=(const RouteInfo& other) = default;
265
266 GpuChannelHost::Listener::RouteInfo& GpuChannelHost::Listener::RouteInfo::
267 operator=(RouteInfo&& other) = default;
268
269 GpuChannelHost::OrderingBarrierInfo::OrderingBarrierInfo() = default;
270
271 GpuChannelHost::OrderingBarrierInfo::~OrderingBarrierInfo() = default;
272
273 GpuChannelHost::OrderingBarrierInfo::OrderingBarrierInfo(
274 OrderingBarrierInfo&&) = default;
275
276 GpuChannelHost::OrderingBarrierInfo& GpuChannelHost::OrderingBarrierInfo::
277 operator=(OrderingBarrierInfo&&) = default;
278
Listener(mojo::ScopedMessagePipeHandle handle,scoped_refptr<base::SingleThreadTaskRunner> io_task_runner)279 GpuChannelHost::Listener::Listener(
280 mojo::ScopedMessagePipeHandle handle,
281 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner)
282 : channel_(IPC::ChannelMojo::Create(
283 std::move(handle),
284 IPC::Channel::MODE_CLIENT,
285 this,
286 io_task_runner,
287 base::ThreadTaskRunnerHandle::Get(),
288 mojo::internal::MessageQuotaChecker::MaybeCreate())) {
289 DCHECK(channel_);
290 DCHECK(io_task_runner->BelongsToCurrentThread());
291 bool result = channel_->Connect();
292 DCHECK(result);
293 }
294
~Listener()295 GpuChannelHost::Listener::~Listener() {
296 DCHECK(pending_syncs_.empty());
297 }
298
Close()299 void GpuChannelHost::Listener::Close() {
300 OnChannelError();
301 }
302
AddRoute(int32_t route_id,base::WeakPtr<IPC::Listener> listener,scoped_refptr<base::SingleThreadTaskRunner> task_runner)303 void GpuChannelHost::Listener::AddRoute(
304 int32_t route_id,
305 base::WeakPtr<IPC::Listener> listener,
306 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
307 DCHECK(routes_.find(route_id) == routes_.end());
308 DCHECK(task_runner);
309 RouteInfo info;
310 info.listener = listener;
311 info.task_runner = std::move(task_runner);
312 routes_[route_id] = info;
313
314 if (lost_) {
315 info.task_runner->PostTask(
316 FROM_HERE,
317 base::BindOnce(&IPC::Listener::OnChannelError, info.listener));
318 }
319 }
320
RemoveRoute(int32_t route_id)321 void GpuChannelHost::Listener::RemoveRoute(int32_t route_id) {
322 routes_.erase(route_id);
323 }
324
OnMessageReceived(const IPC::Message & message)325 bool GpuChannelHost::Listener::OnMessageReceived(const IPC::Message& message) {
326 if (message.is_reply()) {
327 int id = IPC::SyncMessage::GetMessageId(message);
328 auto it = pending_syncs_.find(id);
329 if (it == pending_syncs_.end())
330 return false;
331 auto* pending_sync = it->second;
332 pending_syncs_.erase(it);
333 if (!message.is_reply_error()) {
334 pending_sync->send_result =
335 pending_sync->deserializer->SerializeOutputParameters(message);
336 }
337 pending_sync->done_event->Signal();
338 return true;
339 }
340
341 auto it = routes_.find(message.routing_id());
342 if (it == routes_.end())
343 return false;
344
345 const RouteInfo& info = it->second;
346 info.task_runner->PostTask(
347 FROM_HERE,
348 base::BindOnce(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
349 info.listener, message));
350 return true;
351 }
352
OnChannelError()353 void GpuChannelHost::Listener::OnChannelError() {
354 channel_ = nullptr;
355 // Set the lost state before signalling the proxies. That way, if they
356 // themselves post a task to recreate the context, they will not try to re-use
357 // this channel host.
358 {
359 AutoLock lock(lock_);
360 lost_ = true;
361 }
362
363 for (auto& kv : pending_syncs_) {
364 IPC::PendingSyncMsg* pending_sync = kv.second;
365 pending_sync->done_event->Signal();
366 }
367 pending_syncs_.clear();
368
369 // Inform all the proxies that an error has occurred. This will be reported
370 // via OpenGL as a lost context.
371 for (const auto& kv : routes_) {
372 const RouteInfo& info = kv.second;
373 info.task_runner->PostTask(
374 FROM_HERE,
375 base::BindOnce(&IPC::Listener::OnChannelError, info.listener));
376 }
377
378 routes_.clear();
379 }
380
SendMessage(std::unique_ptr<IPC::Message> msg,IPC::PendingSyncMsg * pending_sync)381 void GpuChannelHost::Listener::SendMessage(std::unique_ptr<IPC::Message> msg,
382 IPC::PendingSyncMsg* pending_sync) {
383 // Note: lost_ is only written on this thread, so it is safe to read here
384 // without lock.
385 if (pending_sync) {
386 DCHECK(msg->is_sync());
387 if (lost_) {
388 pending_sync->done_event->Signal();
389 return;
390 }
391 pending_syncs_.emplace(pending_sync->id, pending_sync);
392 } else {
393 if (lost_)
394 return;
395 DCHECK(!msg->is_sync());
396 }
397 DCHECK(!lost_);
398 channel_->Send(msg.release());
399 }
400
IsLost() const401 bool GpuChannelHost::Listener::IsLost() const {
402 AutoLock lock(lock_);
403 return lost_;
404 }
405
406 } // namespace gpu
407