1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/viz/service/gl/gpu_service_impl.h"
6
7 #include <memory>
8 #include <string>
9 #include <utility>
10
11 #include "base/bind.h"
12 #include "base/command_line.h"
13 #include "base/feature_list.h"
14 #include "base/no_destructor.h"
15 #include "base/task/post_task.h"
16 #include "base/task/thread_pool.h"
17 #include "base/task_runner_util.h"
18 #include "base/threading/thread_task_runner_handle.h"
19 #include "build/build_config.h"
20 #include "components/viz/common/features.h"
21 #include "components/viz/common/gpu/metal_context_provider.h"
22 #include "components/viz/common/gpu/vulkan_context_provider.h"
23 #include "components/viz/common/gpu/vulkan_in_process_context_provider.h"
24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
25 #include "gpu/command_buffer/service/gpu_switches.h"
26 #include "gpu/command_buffer/service/scheduler.h"
27 #include "gpu/command_buffer/service/shared_context_state.h"
28 #include "gpu/command_buffer/service/sync_point_manager.h"
29 #include "gpu/config/dx_diag_node.h"
30 #include "gpu/config/gpu_finch_features.h"
31 #include "gpu/config/gpu_info_collector.h"
32 #include "gpu/config/gpu_switches.h"
33 #include "gpu/config/gpu_util.h"
34 #include "gpu/config/skia_limits.h"
35 #include "gpu/ipc/common/gpu_client_ids.h"
36 #include "gpu/ipc/common/gpu_memory_buffer_support.h"
37 #include "gpu/ipc/common/gpu_peak_memory.h"
38 #include "gpu/ipc/common/memory_stats.h"
39 #include "gpu/ipc/in_process_command_buffer.h"
40 #include "gpu/ipc/service/gpu_channel.h"
41 #include "gpu/ipc/service/gpu_channel_manager.h"
42 #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
43 #include "gpu/ipc/service/gpu_watchdog_thread.h"
44 #include "gpu/ipc/service/image_decode_accelerator_worker.h"
45 #include "gpu/vulkan/buildflags.h"
46 #include "ipc/ipc_channel_handle.h"
47 #include "ipc/ipc_sync_channel.h"
48 #include "ipc/ipc_sync_message_filter.h"
49 #include "media/gpu/buildflags.h"
50 #include "media/gpu/gpu_video_accelerator_util.h"
51 #include "media/gpu/gpu_video_encode_accelerator_factory.h"
52 #include "media/gpu/ipc/service/gpu_video_decode_accelerator.h"
53 #include "media/gpu/ipc/service/media_gpu_channel_manager.h"
54 #include "media/mojo/services/mojo_video_encode_accelerator_provider.h"
55 #include "mojo/public/cpp/bindings/self_owned_receiver.h"
56 #include "skia/buildflags.h"
57 #include "third_party/skia/include/gpu/GrContext.h"
58 #include "third_party/skia/include/gpu/gl/GrGLAssembleInterface.h"
59 #include "third_party/skia/include/gpu/gl/GrGLInterface.h"
60 #include "ui/gl/gl_context.h"
61 #include "ui/gl/gl_implementation.h"
62 #include "ui/gl/gl_switches.h"
63 #include "ui/gl/gl_utils.h"
64 #include "ui/gl/gpu_switching_manager.h"
65 #include "ui/gl/init/create_gr_gl_interface.h"
66 #include "ui/gl/init/gl_factory.h"
67 #include "url/gurl.h"
68
69 #if BUILDFLAG(USE_VAAPI)
70 #include "media/gpu/vaapi/vaapi_image_decode_accelerator_worker.h"
71 #endif // BUILDFLAG(USE_VAAPI)
72
73 #if defined(OS_ANDROID)
74 #include "components/viz/service/gl/throw_uncaught_exception.h"
75 #endif
76
77 #if defined(OS_CHROMEOS)
78 #include "components/arc/video_accelerator/gpu_arc_video_decode_accelerator.h"
79 #include "components/arc/video_accelerator/gpu_arc_video_encode_accelerator.h"
80 #include "components/arc/video_accelerator/gpu_arc_video_protected_buffer_allocator.h"
81 #include "components/arc/video_accelerator/protected_buffer_manager.h"
82 #include "components/arc/video_accelerator/protected_buffer_manager_proxy.h"
83 #include "components/chromeos_camera/gpu_mjpeg_decode_accelerator_factory.h"
84 #include "components/chromeos_camera/mojo_jpeg_encode_accelerator_service.h"
85 #include "components/chromeos_camera/mojo_mjpeg_decode_accelerator_service.h"
86 #endif // defined(OS_CHROMEOS)
87
88 #if defined(OS_WIN)
89 #include "ui/gl/direct_composition_surface_win.h"
90 #endif
91
92 #if defined(OS_MACOSX)
93 #include "ui/base/cocoa/quartz_util.h"
94 #endif
95
96 #if BUILDFLAG(SKIA_USE_DAWN)
97 #include "components/viz/common/gpu/dawn_context_provider.h"
98 #endif
99
100 namespace viz {
101
102 namespace {
103
104 using LogCallback = base::RepeatingCallback<
105 void(int severity, const std::string& header, const std::string& message)>;
106
107 struct LogMessage {
LogMessageviz::__anon23a4c9000111::LogMessage108 LogMessage(int severity,
109 const std::string& header,
110 const std::string& message)
111 : severity(severity),
112 header(std::move(header)),
113 message(std::move(message)) {}
114 const int severity;
115 const std::string header;
116 const std::string message;
117 };
118
119 // Forward declare log handlers so they can be used within LogMessageManager.
120 bool PreInitializeLogHandler(int severity,
121 const char* file,
122 int line,
123 size_t message_start,
124 const std::string& message);
125 bool PostInitializeLogHandler(int severity,
126 const char* file,
127 int line,
128 size_t message_start,
129 const std::string& message);
130
131 // Class which manages LOG() message forwarding before and after GpuServiceImpl
132 // InitializeWithHost(). Prior to initialize, log messages are deferred and kept
133 // within the class. During initialize, InstallPostInitializeLogHandler() will
134 // be called to flush deferred messages and route new ones directly to GpuHost.
135 class LogMessageManager {
136 public:
137 LogMessageManager() = default;
138 ~LogMessageManager() = delete;
139
140 // Queues a deferred LOG() message into |deferred_messages_| unless
141 // |log_callback_| has been set -- in which case RouteMessage() is called.
AddDeferredMessage(int severity,const std::string & header,const std::string & message)142 void AddDeferredMessage(int severity,
143 const std::string& header,
144 const std::string& message) {
145 base::AutoLock lock(message_lock_);
146 // During InstallPostInitializeLogHandler() there's a brief window where a
147 // call into this function may be waiting on |message_lock_|, so we need to
148 // check if |log_callback_| was set once we get the lock.
149 if (log_callback_) {
150 RouteMessage(severity, std::move(header), std::move(message));
151 return;
152 }
153
154 // Otherwise just queue the message for InstallPostInitializeLogHandler() to
155 // forward later.
156 deferred_messages_.emplace_back(severity, std::move(header),
157 std::move(message));
158 }
159
160 // Used after InstallPostInitializeLogHandler() to route messages directly to
161 // |log_callback_|; avoids the need for a global lock.
RouteMessage(int severity,const std::string & header,const std::string & message)162 void RouteMessage(int severity,
163 const std::string& header,
164 const std::string& message) {
165 log_callback_.Run(severity, std::move(header), std::move(message));
166 }
167
168 // If InstallPostInitializeLogHandler() will never be called, this method is
169 // called prior to process exit to ensure logs are forwarded.
FlushMessages(mojom::GpuHost * gpu_host)170 void FlushMessages(mojom::GpuHost* gpu_host) {
171 base::AutoLock lock(message_lock_);
172 for (auto& log : deferred_messages_) {
173 gpu_host->RecordLogMessage(log.severity, std::move(log.header),
174 std::move(log.message));
175 }
176 deferred_messages_.clear();
177 }
178
179 // Used prior to InitializeWithHost() during GpuMain startup to ensure logs
180 // aren't lost before initialize.
InstallPreInitializeLogHandler()181 void InstallPreInitializeLogHandler() {
182 DCHECK(!log_callback_);
183 logging::SetLogMessageHandler(PreInitializeLogHandler);
184 }
185
186 // Called by InitializeWithHost() to take over logging from the
187 // PostInitializeLogHandler(). Flushes all deferred messages.
InstallPostInitializeLogHandler(LogCallback log_callback)188 void InstallPostInitializeLogHandler(LogCallback log_callback) {
189 base::AutoLock lock(message_lock_);
190 DCHECK(!log_callback_);
191 log_callback_ = std::move(log_callback);
192 for (auto& log : deferred_messages_)
193 RouteMessage(log.severity, std::move(log.header), std::move(log.message));
194 deferred_messages_.clear();
195 logging::SetLogMessageHandler(PostInitializeLogHandler);
196 }
197
198 // Called when it's no longer safe to invoke |log_callback_|.
ShutdownLogging()199 void ShutdownLogging() { logging::SetLogMessageHandler(nullptr); }
200
201 private:
202 base::Lock message_lock_;
203 std::vector<LogMessage> deferred_messages_ GUARDED_BY(message_lock_);
204
205 // Set once under |mesage_lock_|, but may be accessed without lock after that.
206 LogCallback log_callback_;
207 };
208
GetLogMessageManager()209 LogMessageManager* GetLogMessageManager() {
210 static base::NoDestructor<LogMessageManager> message_manager;
211 return message_manager.get();
212 }
213
PreInitializeLogHandler(int severity,const char * file,int line,size_t message_start,const std::string & message)214 bool PreInitializeLogHandler(int severity,
215 const char* file,
216 int line,
217 size_t message_start,
218 const std::string& message) {
219 GetLogMessageManager()->AddDeferredMessage(severity,
220 message.substr(0, message_start),
221 message.substr(message_start));
222 return false;
223 }
224
PostInitializeLogHandler(int severity,const char * file,int line,size_t message_start,const std::string & message)225 bool PostInitializeLogHandler(int severity,
226 const char* file,
227 int line,
228 size_t message_start,
229 const std::string& message) {
230 GetLogMessageManager()->RouteMessage(severity,
231 message.substr(0, message_start),
232 message.substr(message_start));
233 return false;
234 }
235
IsAcceleratedJpegDecodeSupported()236 bool IsAcceleratedJpegDecodeSupported() {
237 #if defined(OS_CHROMEOS)
238 return chromeos_camera::GpuMjpegDecodeAcceleratorFactory::
239 IsAcceleratedJpegDecodeSupported();
240 #else
241 return false;
242 #endif // defined(OS_CHROMEOS)
243 }
244
245 // Returns a callback which does a PostTask to run |callback| on the |runner|
246 // task runner.
247 template <typename... Params>
WrapCallback(scoped_refptr<base::SingleThreadTaskRunner> runner,base::OnceCallback<void (Params...)> callback)248 base::OnceCallback<void(Params&&...)> WrapCallback(
249 scoped_refptr<base::SingleThreadTaskRunner> runner,
250 base::OnceCallback<void(Params...)> callback) {
251 return base::BindOnce(
252 [](base::SingleThreadTaskRunner* runner,
253 base::OnceCallback<void(Params && ...)> callback, Params&&... params) {
254 runner->PostTask(FROM_HERE,
255 base::BindOnce(std::move(callback),
256 std::forward<Params>(params)...));
257 },
258 base::RetainedRef(std::move(runner)), std::move(callback));
259 }
260
261 } // namespace
262
GpuServiceImpl(const gpu::GPUInfo & gpu_info,std::unique_ptr<gpu::GpuWatchdogThread> watchdog_thread,scoped_refptr<base::SingleThreadTaskRunner> io_runner,const gpu::GpuFeatureInfo & gpu_feature_info,const gpu::GpuPreferences & gpu_preferences,const base::Optional<gpu::GPUInfo> & gpu_info_for_hardware_gpu,const base::Optional<gpu::GpuFeatureInfo> & gpu_feature_info_for_hardware_gpu,const gpu::GpuExtraInfo & gpu_extra_info,const base::Optional<gpu::DevicePerfInfo> & device_perf_info,gpu::VulkanImplementation * vulkan_implementation,base::OnceCallback<void (bool)> exit_callback)263 GpuServiceImpl::GpuServiceImpl(
264 const gpu::GPUInfo& gpu_info,
265 std::unique_ptr<gpu::GpuWatchdogThread> watchdog_thread,
266 scoped_refptr<base::SingleThreadTaskRunner> io_runner,
267 const gpu::GpuFeatureInfo& gpu_feature_info,
268 const gpu::GpuPreferences& gpu_preferences,
269 const base::Optional<gpu::GPUInfo>& gpu_info_for_hardware_gpu,
270 const base::Optional<gpu::GpuFeatureInfo>&
271 gpu_feature_info_for_hardware_gpu,
272 const gpu::GpuExtraInfo& gpu_extra_info,
273 const base::Optional<gpu::DevicePerfInfo>& device_perf_info,
274 gpu::VulkanImplementation* vulkan_implementation,
275 base::OnceCallback<void(bool /*immediately*/)> exit_callback)
276 : main_runner_(base::ThreadTaskRunnerHandle::Get()),
277 io_runner_(std::move(io_runner)),
278 watchdog_thread_(std::move(watchdog_thread)),
279 gpu_preferences_(gpu_preferences),
280 gpu_info_(gpu_info),
281 gpu_feature_info_(gpu_feature_info),
282 gpu_info_for_hardware_gpu_(gpu_info_for_hardware_gpu),
283 gpu_feature_info_for_hardware_gpu_(gpu_feature_info_for_hardware_gpu),
284 gpu_extra_info_(gpu_extra_info),
285 device_perf_info_(device_perf_info),
286 #if BUILDFLAG(ENABLE_VULKAN)
287 vulkan_implementation_(vulkan_implementation),
288 #endif
289 exit_callback_(std::move(exit_callback)) {
290 DCHECK(!io_runner_->BelongsToCurrentThread());
291 DCHECK(exit_callback_);
292
293 #if defined(OS_CHROMEOS)
294 protected_buffer_manager_ = new arc::ProtectedBufferManager();
295 #endif // defined(OS_CHROMEOS)
296
297 size_t max_resource_cache_bytes;
298 size_t max_glyph_cache_texture_bytes;
299 gpu::DetermineGrCacheLimitsFromAvailableMemory(
300 &max_resource_cache_bytes, &max_glyph_cache_texture_bytes);
301 GrContextOptions context_options;
302 context_options.fGlyphCacheTextureMaximumBytes =
303 max_glyph_cache_texture_bytes;
304 if (gpu_preferences_.force_max_texture_size) {
305 context_options.fMaxTextureSizeOverride =
306 gpu_preferences_.force_max_texture_size;
307 }
308
309 #if BUILDFLAG(ENABLE_VULKAN)
310 if (vulkan_implementation_) {
311 vulkan_context_provider_ = VulkanInProcessContextProvider::Create(
312 vulkan_implementation_, context_options);
313 if (vulkan_context_provider_) {
314 // If Vulkan is supported, then OOP-R is supported.
315 gpu_info_.oop_rasterization_supported = true;
316 gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_OOP_RASTERIZATION] =
317 gpu::kGpuFeatureStatusEnabled;
318 } else {
319 DLOG(ERROR) << "Failed to create Vulkan context provider.";
320 }
321 }
322 #endif
323
324 #if BUILDFLAG(SKIA_USE_DAWN)
325 if (gpu_preferences_.gr_context_type == gpu::GrContextType::kDawn) {
326 dawn_context_provider_ = DawnContextProvider::Create();
327 if (dawn_context_provider_) {
328 gpu_info_.oop_rasterization_supported = true;
329 gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_OOP_RASTERIZATION] =
330 gpu::kGpuFeatureStatusEnabled;
331 } else {
332 DLOG(ERROR) << "Failed to create Dawn context provider.";
333 }
334 }
335 #endif
336
337 #if BUILDFLAG(USE_VAAPI)
338 image_decode_accelerator_worker_ =
339 media::VaapiImageDecodeAcceleratorWorker::Create();
340 #endif
341
342 #if defined(OS_MACOSX)
343 if (gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_METAL] ==
344 gpu::kGpuFeatureStatusEnabled) {
345 metal_context_provider_ = MetalContextProvider::Create(context_options);
346 }
347 #endif
348
349 gpu_memory_buffer_factory_ =
350 gpu::GpuMemoryBufferFactory::CreateNativeType(vulkan_context_provider());
351
352 weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
353 }
354
~GpuServiceImpl()355 GpuServiceImpl::~GpuServiceImpl() {
356 DCHECK(main_runner_->BelongsToCurrentThread());
357
358 // Ensure we don't try to exit when already in the process of exiting.
359 is_exiting_.Set();
360
361 bind_task_tracker_.TryCancelAll();
362 GetLogMessageManager()->ShutdownLogging();
363
364 // Destroy the receiver on the IO thread.
365 base::WaitableEvent wait;
366 auto destroy_receiver_task = base::BindOnce(
367 [](mojo::Receiver<mojom::GpuService>* receiver,
368 base::WaitableEvent* wait) {
369 receiver->reset();
370 wait->Signal();
371 },
372 &receiver_, &wait);
373 if (io_runner_->PostTask(FROM_HERE, std::move(destroy_receiver_task)))
374 wait.Wait();
375
376 if (watchdog_thread_)
377 watchdog_thread_->OnGpuProcessTearDown();
378
379 media_gpu_channel_manager_.reset();
380 gpu_channel_manager_.reset();
381
382 // Scheduler must be destroyed before sync point manager is destroyed.
383 scheduler_.reset();
384 owned_sync_point_manager_.reset();
385 owned_shared_image_manager_.reset();
386
387 // The image decode accelerator worker must outlive the GPU channel manager so
388 // that it doesn't get any decode requests during/after destruction.
389 DCHECK(!gpu_channel_manager_);
390 image_decode_accelerator_worker_.reset();
391
392 // Signal this event before destroying the child process. That way all
393 // background threads can cleanup. For example, in the renderer the
394 // RenderThread instances will be able to notice shutdown before the render
395 // process begins waiting for them to exit.
396 if (owned_shutdown_event_)
397 owned_shutdown_event_->Signal();
398 }
399
UpdateGPUInfo()400 void GpuServiceImpl::UpdateGPUInfo() {
401 DCHECK(main_runner_->BelongsToCurrentThread());
402 DCHECK(!gpu_host_);
403 gpu::GpuDriverBugWorkarounds gpu_workarounds(
404 gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
405 gpu_info_.video_decode_accelerator_capabilities =
406 media::GpuVideoDecodeAccelerator::GetCapabilities(gpu_preferences_,
407 gpu_workarounds);
408 gpu_info_.video_encode_accelerator_supported_profiles =
409 media::GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(
410 media::GpuVideoEncodeAcceleratorFactory::GetSupportedProfiles(
411 gpu_preferences_));
412 gpu_info_.jpeg_decode_accelerator_supported =
413 IsAcceleratedJpegDecodeSupported();
414
415 if (image_decode_accelerator_worker_) {
416 gpu_info_.image_decode_accelerator_supported_profiles =
417 image_decode_accelerator_worker_->GetSupportedProfiles();
418 }
419
420 // Record initialization only after collecting the GPU info because that can
421 // take a significant amount of time.
422 gpu_info_.initialization_time = base::Time::Now() - start_time_;
423 }
424
InitializeWithHost(mojo::PendingRemote<mojom::GpuHost> pending_gpu_host,gpu::GpuProcessActivityFlags activity_flags,scoped_refptr<gl::GLSurface> default_offscreen_surface,gpu::SyncPointManager * sync_point_manager,gpu::SharedImageManager * shared_image_manager,base::WaitableEvent * shutdown_event)425 void GpuServiceImpl::InitializeWithHost(
426 mojo::PendingRemote<mojom::GpuHost> pending_gpu_host,
427 gpu::GpuProcessActivityFlags activity_flags,
428 scoped_refptr<gl::GLSurface> default_offscreen_surface,
429 gpu::SyncPointManager* sync_point_manager,
430 gpu::SharedImageManager* shared_image_manager,
431 base::WaitableEvent* shutdown_event) {
432 DCHECK(main_runner_->BelongsToCurrentThread());
433
434 mojo::Remote<mojom::GpuHost> gpu_host(std::move(pending_gpu_host));
435 gpu_host->DidInitialize(gpu_info_, gpu_feature_info_,
436 gpu_info_for_hardware_gpu_,
437 gpu_feature_info_for_hardware_gpu_, gpu_extra_info_);
438 gpu_host_ = mojo::SharedRemote<mojom::GpuHost>(gpu_host.Unbind(), io_runner_);
439 if (!in_host_process()) {
440 // The global callback is reset from the dtor. So Unretained() here is safe.
441 // Note that the callback can be called from any thread. Consequently, the
442 // callback cannot use a WeakPtr.
443 GetLogMessageManager()->InstallPostInitializeLogHandler(base::BindRepeating(
444 &GpuServiceImpl::RecordLogMessage, base::Unretained(this)));
445 }
446
447 if (!sync_point_manager) {
448 owned_sync_point_manager_ = std::make_unique<gpu::SyncPointManager>();
449 sync_point_manager = owned_sync_point_manager_.get();
450 }
451
452 if (!shared_image_manager) {
453 // When using real buffers for testing overlay configurations, we need
454 // access to SharedImageManager on the viz thread to obtain the buffer
455 // corresponding to a mailbox.
456 bool thread_safe_manager = features::ShouldUseRealBuffersForPageFlipTest();
457 owned_shared_image_manager_ = std::make_unique<gpu::SharedImageManager>(
458 thread_safe_manager, false /* display_context_on_another_thread */);
459 shared_image_manager = owned_shared_image_manager_.get();
460 } else {
461 // With this feature enabled, we don't expect to receive an external
462 // SharedImageManager.
463 DCHECK(!features::ShouldUseRealBuffersForPageFlipTest());
464 }
465
466 shutdown_event_ = shutdown_event;
467 if (!shutdown_event_) {
468 owned_shutdown_event_ = std::make_unique<base::WaitableEvent>(
469 base::WaitableEvent::ResetPolicy::MANUAL,
470 base::WaitableEvent::InitialState::NOT_SIGNALED);
471 shutdown_event_ = owned_shutdown_event_.get();
472 }
473
474 scheduler_ = std::make_unique<gpu::Scheduler>(
475 main_runner_, sync_point_manager, gpu_preferences_);
476
477 // Defer creation of the render thread. This is to prevent it from handling
478 // IPC messages before the sandbox has been enabled and all other necessary
479 // initialization has succeeded.
480 gpu_channel_manager_ = std::make_unique<gpu::GpuChannelManager>(
481 gpu_preferences_, this, watchdog_thread_.get(), main_runner_, io_runner_,
482 scheduler_.get(), sync_point_manager, shared_image_manager,
483 gpu_memory_buffer_factory_.get(), gpu_feature_info_,
484 std::move(activity_flags), std::move(default_offscreen_surface),
485 image_decode_accelerator_worker_.get(), vulkan_context_provider(),
486 metal_context_provider_.get(), dawn_context_provider());
487
488 media_gpu_channel_manager_.reset(
489 new media::MediaGpuChannelManager(gpu_channel_manager_.get()));
490 if (watchdog_thread())
491 watchdog_thread()->AddPowerObserver();
492 }
493
Bind(mojo::PendingReceiver<mojom::GpuService> pending_receiver)494 void GpuServiceImpl::Bind(
495 mojo::PendingReceiver<mojom::GpuService> pending_receiver) {
496 if (main_runner_->BelongsToCurrentThread()) {
497 bind_task_tracker_.PostTask(
498 io_runner_.get(), FROM_HERE,
499 base::BindOnce(&GpuServiceImpl::Bind, base::Unretained(this),
500 std::move(pending_receiver)));
501 return;
502 }
503 DCHECK(!receiver_.is_bound());
504 receiver_.Bind(std::move(pending_receiver));
505 }
506
DisableGpuCompositing()507 void GpuServiceImpl::DisableGpuCompositing() {
508 // Can be called from any thread.
509 gpu_host_->DisableGpuCompositing();
510 }
511
GetContextState()512 scoped_refptr<gpu::SharedContextState> GpuServiceImpl::GetContextState() {
513 DCHECK(main_runner_->BelongsToCurrentThread());
514 gpu::ContextResult result;
515 return gpu_channel_manager_->GetSharedContextState(&result);
516 }
517
gpu_image_factory()518 gpu::ImageFactory* GpuServiceImpl::gpu_image_factory() {
519 return gpu_memory_buffer_factory_
520 ? gpu_memory_buffer_factory_->AsImageFactory()
521 : nullptr;
522 }
523
524 // static
InstallPreInitializeLogHandler()525 void GpuServiceImpl::InstallPreInitializeLogHandler() {
526 GetLogMessageManager()->InstallPreInitializeLogHandler();
527 }
528
529 // static
FlushPreInitializeLogMessages(mojom::GpuHost * gpu_host)530 void GpuServiceImpl::FlushPreInitializeLogMessages(mojom::GpuHost* gpu_host) {
531 GetLogMessageManager()->FlushMessages(gpu_host);
532 }
533
RecordLogMessage(int severity,const std::string & header,const std::string & message)534 void GpuServiceImpl::RecordLogMessage(int severity,
535 const std::string& header,
536 const std::string& message) {
537 // This can be run from any thread.
538 gpu_host_->RecordLogMessage(severity, std::move(header), std::move(message));
539 }
540
541 #if defined(OS_CHROMEOS)
CreateArcVideoDecodeAccelerator(mojo::PendingReceiver<arc::mojom::VideoDecodeAccelerator> vda_receiver)542 void GpuServiceImpl::CreateArcVideoDecodeAccelerator(
543 mojo::PendingReceiver<arc::mojom::VideoDecodeAccelerator> vda_receiver) {
544 DCHECK(io_runner_->BelongsToCurrentThread());
545 main_runner_->PostTask(
546 FROM_HERE,
547 base::BindOnce(
548 &GpuServiceImpl::CreateArcVideoDecodeAcceleratorOnMainThread,
549 weak_ptr_, std::move(vda_receiver)));
550 }
551
CreateArcVideoEncodeAccelerator(mojo::PendingReceiver<arc::mojom::VideoEncodeAccelerator> vea_receiver)552 void GpuServiceImpl::CreateArcVideoEncodeAccelerator(
553 mojo::PendingReceiver<arc::mojom::VideoEncodeAccelerator> vea_receiver) {
554 DCHECK(io_runner_->BelongsToCurrentThread());
555 main_runner_->PostTask(
556 FROM_HERE,
557 base::BindOnce(
558 &GpuServiceImpl::CreateArcVideoEncodeAcceleratorOnMainThread,
559 weak_ptr_, std::move(vea_receiver)));
560 }
561
CreateArcVideoProtectedBufferAllocator(mojo::PendingReceiver<arc::mojom::VideoProtectedBufferAllocator> pba_receiver)562 void GpuServiceImpl::CreateArcVideoProtectedBufferAllocator(
563 mojo::PendingReceiver<arc::mojom::VideoProtectedBufferAllocator>
564 pba_receiver) {
565 DCHECK(io_runner_->BelongsToCurrentThread());
566 main_runner_->PostTask(
567 FROM_HERE,
568 base::BindOnce(
569 &GpuServiceImpl::CreateArcVideoProtectedBufferAllocatorOnMainThread,
570 weak_ptr_, std::move(pba_receiver)));
571 }
572
CreateArcProtectedBufferManager(mojo::PendingReceiver<arc::mojom::ProtectedBufferManager> pbm_receiver)573 void GpuServiceImpl::CreateArcProtectedBufferManager(
574 mojo::PendingReceiver<arc::mojom::ProtectedBufferManager> pbm_receiver) {
575 DCHECK(io_runner_->BelongsToCurrentThread());
576 main_runner_->PostTask(
577 FROM_HERE,
578 base::BindOnce(
579 &GpuServiceImpl::CreateArcProtectedBufferManagerOnMainThread,
580 weak_ptr_, std::move(pbm_receiver)));
581 }
582
CreateArcVideoDecodeAcceleratorOnMainThread(mojo::PendingReceiver<arc::mojom::VideoDecodeAccelerator> vda_receiver)583 void GpuServiceImpl::CreateArcVideoDecodeAcceleratorOnMainThread(
584 mojo::PendingReceiver<arc::mojom::VideoDecodeAccelerator> vda_receiver) {
585 DCHECK(main_runner_->BelongsToCurrentThread());
586 mojo::MakeSelfOwnedReceiver(
587 std::make_unique<arc::GpuArcVideoDecodeAccelerator>(
588 gpu_preferences_, protected_buffer_manager_),
589 std::move(vda_receiver));
590 }
591
CreateArcVideoEncodeAcceleratorOnMainThread(mojo::PendingReceiver<arc::mojom::VideoEncodeAccelerator> vea_receiver)592 void GpuServiceImpl::CreateArcVideoEncodeAcceleratorOnMainThread(
593 mojo::PendingReceiver<arc::mojom::VideoEncodeAccelerator> vea_receiver) {
594 DCHECK(main_runner_->BelongsToCurrentThread());
595 mojo::MakeSelfOwnedReceiver(
596 std::make_unique<arc::GpuArcVideoEncodeAccelerator>(gpu_preferences_),
597 std::move(vea_receiver));
598 }
599
CreateArcVideoProtectedBufferAllocatorOnMainThread(mojo::PendingReceiver<arc::mojom::VideoProtectedBufferAllocator> pba_receiver)600 void GpuServiceImpl::CreateArcVideoProtectedBufferAllocatorOnMainThread(
601 mojo::PendingReceiver<arc::mojom::VideoProtectedBufferAllocator>
602 pba_receiver) {
603 DCHECK(main_runner_->BelongsToCurrentThread());
604 auto gpu_arc_video_protected_buffer_allocator =
605 arc::GpuArcVideoProtectedBufferAllocator::Create(
606 protected_buffer_manager_);
607 if (!gpu_arc_video_protected_buffer_allocator)
608 return;
609 mojo::MakeSelfOwnedReceiver(
610 std::move(gpu_arc_video_protected_buffer_allocator),
611 std::move(pba_receiver));
612 }
613
CreateArcProtectedBufferManagerOnMainThread(mojo::PendingReceiver<arc::mojom::ProtectedBufferManager> pbm_receiver)614 void GpuServiceImpl::CreateArcProtectedBufferManagerOnMainThread(
615 mojo::PendingReceiver<arc::mojom::ProtectedBufferManager> pbm_receiver) {
616 DCHECK(main_runner_->BelongsToCurrentThread());
617 mojo::MakeSelfOwnedReceiver(
618 std::make_unique<arc::GpuArcProtectedBufferManagerProxy>(
619 protected_buffer_manager_),
620 std::move(pbm_receiver));
621 }
622
CreateJpegDecodeAccelerator(mojo::PendingReceiver<chromeos_camera::mojom::MjpegDecodeAccelerator> jda_receiver)623 void GpuServiceImpl::CreateJpegDecodeAccelerator(
624 mojo::PendingReceiver<chromeos_camera::mojom::MjpegDecodeAccelerator>
625 jda_receiver) {
626 DCHECK(io_runner_->BelongsToCurrentThread());
627 chromeos_camera::MojoMjpegDecodeAcceleratorService::Create(
628 std::move(jda_receiver));
629 }
630
CreateJpegEncodeAccelerator(mojo::PendingReceiver<chromeos_camera::mojom::JpegEncodeAccelerator> jea_receiver)631 void GpuServiceImpl::CreateJpegEncodeAccelerator(
632 mojo::PendingReceiver<chromeos_camera::mojom::JpegEncodeAccelerator>
633 jea_receiver) {
634 DCHECK(io_runner_->BelongsToCurrentThread());
635 chromeos_camera::MojoJpegEncodeAcceleratorService::Create(
636 std::move(jea_receiver));
637 }
638 #endif // defined(OS_CHROMEOS)
639
CreateVideoEncodeAcceleratorProvider(mojo::PendingReceiver<media::mojom::VideoEncodeAcceleratorProvider> vea_provider_receiver)640 void GpuServiceImpl::CreateVideoEncodeAcceleratorProvider(
641 mojo::PendingReceiver<media::mojom::VideoEncodeAcceleratorProvider>
642 vea_provider_receiver) {
643 DCHECK(io_runner_->BelongsToCurrentThread());
644 media::MojoVideoEncodeAcceleratorProvider::Create(
645 std::move(vea_provider_receiver),
646 base::BindRepeating(&media::GpuVideoEncodeAcceleratorFactory::CreateVEA),
647 gpu_preferences_);
648 }
649
CreateGpuMemoryBuffer(gfx::GpuMemoryBufferId id,const gfx::Size & size,gfx::BufferFormat format,gfx::BufferUsage usage,int client_id,gpu::SurfaceHandle surface_handle,CreateGpuMemoryBufferCallback callback)650 void GpuServiceImpl::CreateGpuMemoryBuffer(
651 gfx::GpuMemoryBufferId id,
652 const gfx::Size& size,
653 gfx::BufferFormat format,
654 gfx::BufferUsage usage,
655 int client_id,
656 gpu::SurfaceHandle surface_handle,
657 CreateGpuMemoryBufferCallback callback) {
658 DCHECK(io_runner_->BelongsToCurrentThread());
659 // This needs to happen in the IO thread.
660 gpu_memory_buffer_factory_->CreateGpuMemoryBufferAsync(
661 id, size, format, usage, client_id, surface_handle, std::move(callback));
662 }
663
DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,int client_id,const gpu::SyncToken & sync_token)664 void GpuServiceImpl::DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
665 int client_id,
666 const gpu::SyncToken& sync_token) {
667 if (io_runner_->BelongsToCurrentThread()) {
668 main_runner_->PostTask(
669 FROM_HERE, base::BindOnce(&GpuServiceImpl::DestroyGpuMemoryBuffer,
670 weak_ptr_, id, client_id, sync_token));
671 return;
672 }
673 gpu_channel_manager_->DestroyGpuMemoryBuffer(id, client_id, sync_token);
674 }
675
GetVideoMemoryUsageStats(GetVideoMemoryUsageStatsCallback callback)676 void GpuServiceImpl::GetVideoMemoryUsageStats(
677 GetVideoMemoryUsageStatsCallback callback) {
678 if (io_runner_->BelongsToCurrentThread()) {
679 auto wrap_callback = WrapCallback(io_runner_, std::move(callback));
680 main_runner_->PostTask(
681 FROM_HERE, base::BindOnce(&GpuServiceImpl::GetVideoMemoryUsageStats,
682 weak_ptr_, std::move(wrap_callback)));
683 return;
684 }
685 gpu::VideoMemoryUsageStats video_memory_usage_stats;
686 gpu_channel_manager_->GetVideoMemoryUsageStats(&video_memory_usage_stats);
687 std::move(callback).Run(video_memory_usage_stats);
688 }
689
StartPeakMemoryMonitor(uint32_t sequence_num)690 void GpuServiceImpl::StartPeakMemoryMonitor(uint32_t sequence_num) {
691 DCHECK(io_runner_->BelongsToCurrentThread());
692 main_runner_->PostTask(
693 FROM_HERE,
694 base::BindOnce(&GpuServiceImpl::StartPeakMemoryMonitorOnMainThread,
695 weak_ptr_, sequence_num));
696 }
697
GetPeakMemoryUsage(uint32_t sequence_num,GetPeakMemoryUsageCallback callback)698 void GpuServiceImpl::GetPeakMemoryUsage(uint32_t sequence_num,
699 GetPeakMemoryUsageCallback callback) {
700 DCHECK(io_runner_->BelongsToCurrentThread());
701 main_runner_->PostTask(
702 FROM_HERE, base::BindOnce(&GpuServiceImpl::GetPeakMemoryUsageOnMainThread,
703 weak_ptr_, sequence_num, std::move(callback)));
704 }
705
706 #if defined(OS_WIN)
GetGpuSupportedRuntimeVersionAndDevicePerfInfo(GetGpuSupportedRuntimeVersionAndDevicePerfInfoCallback callback)707 void GpuServiceImpl::GetGpuSupportedRuntimeVersionAndDevicePerfInfo(
708 GetGpuSupportedRuntimeVersionAndDevicePerfInfoCallback callback) {
709 if (io_runner_->BelongsToCurrentThread()) {
710 auto wrap_callback = WrapCallback(io_runner_, std::move(callback));
711 main_runner_->PostTask(
712 FROM_HERE,
713 base::BindOnce(
714 &GpuServiceImpl::GetGpuSupportedRuntimeVersionAndDevicePerfInfo,
715 weak_ptr_, std::move(wrap_callback)));
716 return;
717 }
718 DCHECK(main_runner_->BelongsToCurrentThread());
719
720 // GPU full info collection should only happen on un-sandboxed GPU process
721 // or single process/in-process gpu mode on Windows.
722 base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
723 DCHECK(command_line->HasSwitch("disable-gpu-sandbox") || in_host_process());
724
725 gpu::RecordGpuSupportedRuntimeVersionHistograms(
726 &gpu_info_.dx12_vulkan_version_info);
727 DCHECK(device_perf_info_.has_value());
728 std::move(callback).Run(gpu_info_.dx12_vulkan_version_info,
729 device_perf_info_.value());
730 }
731
RequestCompleteGpuInfo(RequestCompleteGpuInfoCallback callback)732 void GpuServiceImpl::RequestCompleteGpuInfo(
733 RequestCompleteGpuInfoCallback callback) {
734 if (io_runner_->BelongsToCurrentThread()) {
735 auto wrap_callback = WrapCallback(io_runner_, std::move(callback));
736 main_runner_->PostTask(
737 FROM_HERE, base::BindOnce(&GpuServiceImpl::RequestCompleteGpuInfo,
738 weak_ptr_, std::move(wrap_callback)));
739 return;
740 }
741 DCHECK(main_runner_->BelongsToCurrentThread());
742
743 UpdateGpuInfoPlatform(base::BindOnce(
744 IgnoreResult(&base::TaskRunner::PostTask), main_runner_, FROM_HERE,
745 base::BindOnce(
746 [](GpuServiceImpl* gpu_service,
747 RequestCompleteGpuInfoCallback callback) {
748 std::move(callback).Run(gpu_service->gpu_info_.dx_diagnostics);
749 },
750 this, std::move(callback))));
751 }
752 #endif
753
RequestHDRStatus(RequestHDRStatusCallback callback)754 void GpuServiceImpl::RequestHDRStatus(RequestHDRStatusCallback callback) {
755 DCHECK(io_runner_->BelongsToCurrentThread());
756 main_runner_->PostTask(
757 FROM_HERE, base::BindOnce(&GpuServiceImpl::RequestHDRStatusOnMainThread,
758 weak_ptr_, std::move(callback)));
759 }
760
RequestHDRStatusOnMainThread(RequestHDRStatusCallback callback)761 void GpuServiceImpl::RequestHDRStatusOnMainThread(
762 RequestHDRStatusCallback callback) {
763 DCHECK(main_runner_->BelongsToCurrentThread());
764 bool hdr_enabled = false;
765 #if defined(OS_WIN)
766 hdr_enabled = gl::DirectCompositionSurfaceWin::IsHDRSupported();
767 #endif
768 io_runner_->PostTask(FROM_HERE,
769 base::BindOnce(std::move(callback), hdr_enabled));
770 }
771
772 #if defined(OS_WIN)
UpdateGpuInfoPlatform(base::OnceClosure on_gpu_info_updated)773 void GpuServiceImpl::UpdateGpuInfoPlatform(
774 base::OnceClosure on_gpu_info_updated) {
775 DCHECK(main_runner_->BelongsToCurrentThread());
776 // GPU full info collection should only happen on un-sandboxed GPU process
777 // or single process/in-process gpu mode on Windows.
778 base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
779 DCHECK(command_line->HasSwitch("disable-gpu-sandbox") || in_host_process());
780
781 // We can continue on shutdown here because we're not writing any critical
782 // state in this task.
783 base::PostTaskAndReplyWithResult(
784 base::ThreadPool::CreateCOMSTATaskRunner(
785 {base::TaskPriority::USER_VISIBLE,
786 base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN})
787 .get(),
788 FROM_HERE, base::BindOnce([]() {
789 gpu::DxDiagNode dx_diag_node;
790 gpu::GetDxDiagnostics(&dx_diag_node);
791 return dx_diag_node;
792 }),
793 base::BindOnce(
794 [](GpuServiceImpl* gpu_service, base::OnceClosure on_gpu_info_updated,
795 const gpu::DxDiagNode& dx_diag_node) {
796 gpu_service->gpu_info_.dx_diagnostics = dx_diag_node;
797 std::move(on_gpu_info_updated).Run();
798 },
799 this, std::move(on_gpu_info_updated)));
800 }
801 #else
UpdateGpuInfoPlatform(base::OnceClosure on_gpu_info_updated)802 void GpuServiceImpl::UpdateGpuInfoPlatform(
803 base::OnceClosure on_gpu_info_updated) {
804 std::move(on_gpu_info_updated).Run();
805 }
806 #endif
807
RegisterDisplayContext(gpu::DisplayContext * display_context)808 void GpuServiceImpl::RegisterDisplayContext(
809 gpu::DisplayContext* display_context) {
810 DCHECK(main_runner_->BelongsToCurrentThread());
811 display_contexts_.AddObserver(display_context);
812 }
813
UnregisterDisplayContext(gpu::DisplayContext * display_context)814 void GpuServiceImpl::UnregisterDisplayContext(
815 gpu::DisplayContext* display_context) {
816 DCHECK(main_runner_->BelongsToCurrentThread());
817 display_contexts_.RemoveObserver(display_context);
818 }
819
LoseAllContexts()820 void GpuServiceImpl::LoseAllContexts() {
821 DCHECK(main_runner_->BelongsToCurrentThread());
822
823 if (IsExiting())
824 return;
825
826 for (auto& display_context : display_contexts_)
827 display_context.MarkContextLost();
828 gpu_channel_manager_->LoseAllContexts();
829 }
830
DidCreateContextSuccessfully()831 void GpuServiceImpl::DidCreateContextSuccessfully() {
832 DCHECK(main_runner_->BelongsToCurrentThread());
833 gpu_host_->DidCreateContextSuccessfully();
834 }
835
DidCreateOffscreenContext(const GURL & active_url)836 void GpuServiceImpl::DidCreateOffscreenContext(const GURL& active_url) {
837 DCHECK(main_runner_->BelongsToCurrentThread());
838 gpu_host_->DidCreateOffscreenContext(active_url);
839 }
840
DidDestroyChannel(int client_id)841 void GpuServiceImpl::DidDestroyChannel(int client_id) {
842 DCHECK(main_runner_->BelongsToCurrentThread());
843 media_gpu_channel_manager_->RemoveChannel(client_id);
844 gpu_host_->DidDestroyChannel(client_id);
845 }
846
DidDestroyAllChannels()847 void GpuServiceImpl::DidDestroyAllChannels() {
848 DCHECK(main_runner_->BelongsToCurrentThread());
849 gpu_host_->DidDestroyAllChannels();
850 }
851
DidDestroyOffscreenContext(const GURL & active_url)852 void GpuServiceImpl::DidDestroyOffscreenContext(const GURL& active_url) {
853 DCHECK(main_runner_->BelongsToCurrentThread());
854 gpu_host_->DidDestroyOffscreenContext(active_url);
855 }
856
DidLoseContext(bool offscreen,gpu::error::ContextLostReason reason,const GURL & active_url)857 void GpuServiceImpl::DidLoseContext(bool offscreen,
858 gpu::error::ContextLostReason reason,
859 const GURL& active_url) {
860 DCHECK(main_runner_->BelongsToCurrentThread());
861 gpu_host_->DidLoseContext(offscreen, reason, active_url);
862 }
863
864 #if defined(OS_WIN)
DidUpdateOverlayInfo(const gpu::OverlayInfo & overlay_info)865 void GpuServiceImpl::DidUpdateOverlayInfo(
866 const gpu::OverlayInfo& overlay_info) {
867 gpu_host_->DidUpdateOverlayInfo(gpu_info_.overlay_info);
868 }
869 #endif
870
StoreShaderToDisk(int client_id,const std::string & key,const std::string & shader)871 void GpuServiceImpl::StoreShaderToDisk(int client_id,
872 const std::string& key,
873 const std::string& shader) {
874 DCHECK(main_runner_->BelongsToCurrentThread());
875 gpu_host_->StoreShaderToDisk(client_id, key, shader);
876 }
877
MaybeExitOnContextLost()878 void GpuServiceImpl::MaybeExitOnContextLost() {
879 MaybeExit(true);
880 }
881
IsExiting() const882 bool GpuServiceImpl::IsExiting() const {
883 return is_exiting_.IsSet();
884 }
885
886 #if defined(OS_WIN)
SendCreatedChildWindow(gpu::SurfaceHandle parent_window,gpu::SurfaceHandle child_window)887 void GpuServiceImpl::SendCreatedChildWindow(gpu::SurfaceHandle parent_window,
888 gpu::SurfaceHandle child_window) {
889 // This can be called from main or display compositor thread.
890 gpu_host_->SetChildSurface(parent_window, child_window);
891 }
892 #endif
893
EstablishGpuChannel(int32_t client_id,uint64_t client_tracing_id,bool is_gpu_host,bool cache_shaders_on_disk,EstablishGpuChannelCallback callback)894 void GpuServiceImpl::EstablishGpuChannel(int32_t client_id,
895 uint64_t client_tracing_id,
896 bool is_gpu_host,
897 bool cache_shaders_on_disk,
898 EstablishGpuChannelCallback callback) {
899 // This should always be called on the IO thread first.
900 if (io_runner_->BelongsToCurrentThread()) {
901 if (IsExiting()) {
902 // We are already exiting so there is no point in responding. Close the
903 // receiver so we can safely drop the callback.
904 receiver_.reset();
905 return;
906 }
907
908 if (gpu::IsReservedClientId(client_id)) {
909 // This returns a null handle, which is treated by the client as a failure
910 // case.
911 std::move(callback).Run(mojo::ScopedMessagePipeHandle());
912 return;
913 }
914
915 EstablishGpuChannelCallback wrap_callback = base::BindOnce(
916 [](scoped_refptr<base::SingleThreadTaskRunner> runner,
917 EstablishGpuChannelCallback cb,
918 mojo::ScopedMessagePipeHandle handle) {
919 runner->PostTask(FROM_HERE,
920 base::BindOnce(std::move(cb), std::move(handle)));
921 },
922 io_runner_, std::move(callback));
923 main_runner_->PostTask(
924 FROM_HERE,
925 base::BindOnce(&GpuServiceImpl::EstablishGpuChannel, weak_ptr_,
926 client_id, client_tracing_id, is_gpu_host,
927 cache_shaders_on_disk, std::move(wrap_callback)));
928 return;
929 }
930
931 gpu::GpuChannel* gpu_channel = gpu_channel_manager_->EstablishChannel(
932 client_id, client_tracing_id, is_gpu_host, cache_shaders_on_disk);
933
934 if (!gpu_channel) {
935 // This returns a null handle, which is treated by the client as a failure
936 // case.
937 std::move(callback).Run(mojo::ScopedMessagePipeHandle());
938 return;
939 }
940 mojo::MessagePipe pipe;
941 gpu_channel->Init(pipe.handle0.release(), shutdown_event_);
942
943 media_gpu_channel_manager_->AddChannel(client_id);
944
945 std::move(callback).Run(std::move(pipe.handle1));
946 }
947
CloseChannel(int32_t client_id)948 void GpuServiceImpl::CloseChannel(int32_t client_id) {
949 if (io_runner_->BelongsToCurrentThread()) {
950 main_runner_->PostTask(
951 FROM_HERE,
952 base::BindOnce(&GpuServiceImpl::CloseChannel, weak_ptr_, client_id));
953 return;
954 }
955 gpu_channel_manager_->RemoveChannel(client_id);
956 }
957
LoadedShader(int32_t client_id,const std::string & key,const std::string & data)958 void GpuServiceImpl::LoadedShader(int32_t client_id,
959 const std::string& key,
960 const std::string& data) {
961 if (io_runner_->BelongsToCurrentThread()) {
962 main_runner_->PostTask(
963 FROM_HERE, base::BindOnce(&GpuServiceImpl::LoadedShader, weak_ptr_,
964 client_id, key, data));
965 return;
966 }
967 gpu_channel_manager_->PopulateShaderCache(client_id, key, data);
968 }
969
WakeUpGpu()970 void GpuServiceImpl::WakeUpGpu() {
971 if (io_runner_->BelongsToCurrentThread()) {
972 main_runner_->PostTask(
973 FROM_HERE, base::BindOnce(&GpuServiceImpl::WakeUpGpu, weak_ptr_));
974 return;
975 }
976 #if defined(OS_ANDROID)
977 gpu_channel_manager_->WakeUpGpu();
978 #else
979 NOTREACHED() << "WakeUpGpu() not supported on this platform.";
980 #endif
981 }
982
GpuSwitched(gl::GpuPreference active_gpu_heuristic)983 void GpuServiceImpl::GpuSwitched(gl::GpuPreference active_gpu_heuristic) {
984 DVLOG(1) << "GPU: GPU has switched";
985 if (!in_host_process())
986 ui::GpuSwitchingManager::GetInstance()->NotifyGpuSwitched(
987 active_gpu_heuristic);
988 }
989
DisplayAdded()990 void GpuServiceImpl::DisplayAdded() {
991 if (io_runner_->BelongsToCurrentThread()) {
992 main_runner_->PostTask(
993 FROM_HERE, base::BindOnce(&GpuServiceImpl::DisplayAdded, weak_ptr_));
994 return;
995 }
996 DVLOG(1) << "GPU: A monitor is plugged in";
997
998 if (!in_host_process())
999 ui::GpuSwitchingManager::GetInstance()->NotifyDisplayAdded();
1000
1001 #if defined(OS_WIN)
1002 // Update overlay info in the GPU process and send the updated data back to
1003 // the GPU host in the Browser process through mojom if the info has changed.
1004 UpdateOverlayInfo();
1005 #endif
1006 }
1007
DisplayRemoved()1008 void GpuServiceImpl::DisplayRemoved() {
1009 if (io_runner_->BelongsToCurrentThread()) {
1010 main_runner_->PostTask(
1011 FROM_HERE, base::BindOnce(&GpuServiceImpl::DisplayRemoved, weak_ptr_));
1012 return;
1013 }
1014 DVLOG(1) << "GPU: A monitor is unplugged ";
1015
1016 if (!in_host_process())
1017 ui::GpuSwitchingManager::GetInstance()->NotifyDisplayRemoved();
1018
1019 #if defined(OS_WIN)
1020 // Update overlay info in the GPU process and send the updated data back to
1021 // the GPU host in the Browser process through mojom if the info has changed.
1022 UpdateOverlayInfo();
1023 #endif
1024 }
1025
DestroyAllChannels()1026 void GpuServiceImpl::DestroyAllChannels() {
1027 if (io_runner_->BelongsToCurrentThread()) {
1028 main_runner_->PostTask(
1029 FROM_HERE,
1030 base::BindOnce(&GpuServiceImpl::DestroyAllChannels, weak_ptr_));
1031 return;
1032 }
1033 DVLOG(1) << "GPU: Removing all contexts";
1034 gpu_channel_manager_->DestroyAllChannels();
1035 }
1036
OnBackgroundCleanup()1037 void GpuServiceImpl::OnBackgroundCleanup() {
1038 // Currently only called on Android.
1039 #if defined(OS_ANDROID)
1040 if (io_runner_->BelongsToCurrentThread()) {
1041 main_runner_->PostTask(
1042 FROM_HERE,
1043 base::BindOnce(&GpuServiceImpl::OnBackgroundCleanup, weak_ptr_));
1044 return;
1045 }
1046 DVLOG(1) << "GPU: Performing background cleanup";
1047 gpu_channel_manager_->OnBackgroundCleanup();
1048 #else
1049 NOTREACHED();
1050 #endif
1051 }
1052
OnBackgrounded()1053 void GpuServiceImpl::OnBackgrounded() {
1054 DCHECK(io_runner_->BelongsToCurrentThread());
1055 if (watchdog_thread_)
1056 watchdog_thread_->OnBackgrounded();
1057
1058 main_runner_->PostTask(
1059 FROM_HERE,
1060 base::BindOnce(&GpuServiceImpl::OnBackgroundedOnMainThread, weak_ptr_));
1061 }
1062
OnBackgroundedOnMainThread()1063 void GpuServiceImpl::OnBackgroundedOnMainThread() {
1064 gpu_channel_manager_->OnApplicationBackgrounded();
1065 }
1066
OnForegrounded()1067 void GpuServiceImpl::OnForegrounded() {
1068 if (watchdog_thread_)
1069 watchdog_thread_->OnForegrounded();
1070 }
1071
1072 #if !defined(OS_ANDROID)
OnMemoryPressure(::base::MemoryPressureListener::MemoryPressureLevel level)1073 void GpuServiceImpl::OnMemoryPressure(
1074 ::base::MemoryPressureListener::MemoryPressureLevel level) {
1075 // Forward the notification to the registry of MemoryPressureListeners.
1076 base::MemoryPressureListener::NotifyMemoryPressure(level);
1077 }
1078 #endif
1079
1080 #if defined(OS_MACOSX)
BeginCATransaction()1081 void GpuServiceImpl::BeginCATransaction() {
1082 DCHECK(io_runner_->BelongsToCurrentThread());
1083 main_runner_->PostTask(FROM_HERE, base::BindOnce(&ui::BeginCATransaction));
1084 }
1085
CommitCATransaction(CommitCATransactionCallback callback)1086 void GpuServiceImpl::CommitCATransaction(CommitCATransactionCallback callback) {
1087 DCHECK(io_runner_->BelongsToCurrentThread());
1088 main_runner_->PostTaskAndReply(FROM_HERE,
1089 base::BindOnce(&ui::CommitCATransaction),
1090 WrapCallback(io_runner_, std::move(callback)));
1091 }
1092 #endif
1093
Crash()1094 void GpuServiceImpl::Crash() {
1095 DCHECK(io_runner_->BelongsToCurrentThread());
1096 gl::Crash();
1097 }
1098
Hang()1099 void GpuServiceImpl::Hang() {
1100 DCHECK(io_runner_->BelongsToCurrentThread());
1101 main_runner_->PostTask(FROM_HERE, base::BindOnce(&gl::Hang));
1102 }
1103
ThrowJavaException()1104 void GpuServiceImpl::ThrowJavaException() {
1105 DCHECK(io_runner_->BelongsToCurrentThread());
1106 #if defined(OS_ANDROID)
1107 ThrowUncaughtException();
1108 #else
1109 NOTREACHED() << "Java exception not supported on this platform.";
1110 #endif
1111 }
1112
Stop(StopCallback callback)1113 void GpuServiceImpl::Stop(StopCallback callback) {
1114 DCHECK(io_runner_->BelongsToCurrentThread());
1115 main_runner_->PostTaskAndReply(
1116 FROM_HERE, base::BindOnce(&GpuServiceImpl::MaybeExit, weak_ptr_, false),
1117 std::move(callback));
1118 }
1119
StartPeakMemoryMonitorOnMainThread(uint32_t sequence_num)1120 void GpuServiceImpl::StartPeakMemoryMonitorOnMainThread(uint32_t sequence_num) {
1121 gpu_channel_manager_->StartPeakMemoryMonitor(sequence_num);
1122 }
1123
GetPeakMemoryUsageOnMainThread(uint32_t sequence_num,GetPeakMemoryUsageCallback callback)1124 void GpuServiceImpl::GetPeakMemoryUsageOnMainThread(
1125 uint32_t sequence_num,
1126 GetPeakMemoryUsageCallback callback) {
1127 uint64_t peak_memory = 0u;
1128 auto allocation_per_source =
1129 gpu_channel_manager_->GetPeakMemoryUsage(sequence_num, &peak_memory);
1130 io_runner_->PostTask(FROM_HERE,
1131 base::BindOnce(std::move(callback), peak_memory,
1132 std::move(allocation_per_source)));
1133 }
1134
MaybeExit(bool for_context_loss)1135 void GpuServiceImpl::MaybeExit(bool for_context_loss) {
1136 DCHECK(main_runner_->BelongsToCurrentThread());
1137
1138 // We can't restart the GPU process when running in the host process.
1139 if (in_host_process())
1140 return;
1141
1142 if (IsExiting() || !exit_callback_)
1143 return;
1144
1145 if (for_context_loss) {
1146 LOG(ERROR) << "Exiting GPU process because some drivers can't recover "
1147 "from errors. GPU process will restart shortly.";
1148 }
1149 is_exiting_.Set();
1150 // For the unsandboxed GPU info collection process used for info collection,
1151 // if we exit immediately, then the reply message could be lost. That's why
1152 // the |exit_callback_| takes the boolean argument.
1153 std::move(exit_callback_).Run(/*immediately=*/for_context_loss);
1154 }
1155
GetGpuScheduler()1156 gpu::Scheduler* GpuServiceImpl::GetGpuScheduler() {
1157 return scheduler_.get();
1158 }
1159
1160 #if defined(OS_WIN)
UpdateOverlayInfo()1161 void GpuServiceImpl::UpdateOverlayInfo() {
1162 gpu::OverlayInfo old_overlay_info = gpu_info_.overlay_info;
1163 gpu::CollectHardwareOverlayInfo(&gpu_info_.overlay_info);
1164
1165 if (old_overlay_info != gpu_info_.overlay_info)
1166 DidUpdateOverlayInfo(gpu_info_.overlay_info);
1167 }
1168 #endif
1169
1170 } // namespace viz
1171