1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "gpu/ipc/service/gpu_channel_manager.h"
6 
7 #include <algorithm>
8 #include <utility>
9 
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/location.h"
13 #include "base/metrics/histogram_macros.h"
14 #include "base/run_loop.h"
15 #include "base/single_thread_task_runner.h"
16 #include "base/system/sys_info.h"
17 #include "base/threading/thread_task_runner_handle.h"
18 #include "base/trace_event/traced_value.h"
19 #include "build/build_config.h"
20 #include "components/viz/common/features.h"
21 #include "gpu/command_buffer/common/context_creation_attribs.h"
22 #include "gpu/command_buffer/common/sync_token.h"
23 #include "gpu/command_buffer/service/feature_info.h"
24 #include "gpu/command_buffer/service/gpu_tracer.h"
25 #include "gpu/command_buffer/service/mailbox_manager_factory.h"
26 #include "gpu/command_buffer/service/memory_program_cache.h"
27 #include "gpu/command_buffer/service/passthrough_program_cache.h"
28 #include "gpu/command_buffer/service/scheduler.h"
29 #include "gpu/command_buffer/service/service_utils.h"
30 #include "gpu/command_buffer/service/sync_point_manager.h"
31 #include "gpu/ipc/common/gpu_client_ids.h"
32 #include "gpu/ipc/common/gpu_messages.h"
33 #include "gpu/ipc/common/memory_stats.h"
34 #include "gpu/ipc/service/gpu_channel.h"
35 #include "gpu/ipc/service/gpu_channel_manager_delegate.h"
36 #include "gpu/ipc/service/gpu_memory_buffer_factory.h"
37 #include "gpu/ipc/service/gpu_watchdog_thread.h"
38 #include "third_party/skia/include/core/SkGraphics.h"
39 #if defined(OS_WIN)
40 #include "ui/gl/gl_angle_util_win.h"
41 #endif
42 #include "ui/gl/gl_bindings.h"
43 #include "ui/gl/gl_share_group.h"
44 #include "ui/gl/gl_version_info.h"
45 #include "ui/gl/init/gl_factory.h"
46 
47 namespace gpu {
48 
49 namespace {
50 #if defined(OS_ANDROID)
51 // Amount of time we expect the GPU to stay powered up without being used.
52 const int kMaxGpuIdleTimeMs = 40;
53 // Maximum amount of time we keep pinging the GPU waiting for the client to
54 // draw.
55 const int kMaxKeepAliveTimeMs = 200;
56 #endif
57 #if defined(OS_WIN)
TrimD3DResources()58 void TrimD3DResources() {
59   // Graphics drivers periodically allocate internal memory buffers in
60   // order to speed up subsequent rendering requests. These memory allocations
61   // in general lead to increased memory usage by the overall system.
62   // Calling Trim discards internal memory buffers allocated for the app,
63   // reducing its memory footprint.
64   // Calling Trim method does not change the rendering state of the
65   // graphics device and has no effect on rendering operations.
66   // There is a brief performance hit when internal buffers are reallocated
67   // during the first rendering operations after the Trim call, therefore
68   // apps should only call Trim when going idle for a period of time or during
69   // low memory conditions.
70   Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
71       gl::QueryD3D11DeviceObjectFromANGLE();
72   if (d3d11_device) {
73     Microsoft::WRL::ComPtr<IDXGIDevice3> dxgi_device;
74     if (SUCCEEDED(d3d11_device.As(&dxgi_device))) {
75       dxgi_device->Trim();
76     }
77   }
78 }
79 #endif
80 
FormatAllocationSourcesForTracing(base::trace_event::TracedValue * dict,base::flat_map<GpuPeakMemoryAllocationSource,uint64_t> & allocation_sources)81 void FormatAllocationSourcesForTracing(
82     base::trace_event::TracedValue* dict,
83     base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>&
84         allocation_sources) {
85   dict->SetInteger("UNKNOWN",
86                    allocation_sources[GpuPeakMemoryAllocationSource::UNKNOWN]);
87   dict->SetInteger(
88       "COMMAND_BUFFER",
89       allocation_sources[GpuPeakMemoryAllocationSource::COMMAND_BUFFER]);
90   dict->SetInteger(
91       "SHARED_CONTEXT_STATE",
92       allocation_sources[GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE]);
93   dict->SetInteger(
94       "SHARED_IMAGE_STUB",
95       allocation_sources[GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB]);
96   dict->SetInteger("SKIA",
97                    allocation_sources[GpuPeakMemoryAllocationSource::SKIA]);
98 }
99 
100 }  // namespace
101 
GpuPeakMemoryMonitor()102 GpuChannelManager::GpuPeakMemoryMonitor::GpuPeakMemoryMonitor()
103     : weak_factory_(this) {}
104 
105 GpuChannelManager::GpuPeakMemoryMonitor::~GpuPeakMemoryMonitor() = default;
106 
107 base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
GetPeakMemoryUsage(uint32_t sequence_num,uint64_t * out_peak_memory)108 GpuChannelManager::GpuPeakMemoryMonitor::GetPeakMemoryUsage(
109     uint32_t sequence_num,
110     uint64_t* out_peak_memory) {
111   auto sequence = sequence_trackers_.find(sequence_num);
112   base::flat_map<GpuPeakMemoryAllocationSource, uint64_t> allocation_per_source;
113   *out_peak_memory = 0u;
114   if (sequence != sequence_trackers_.end()) {
115     *out_peak_memory = sequence->second.total_memory_;
116     allocation_per_source = sequence->second.peak_memory_per_source_;
117   }
118   return allocation_per_source;
119 }
120 
StartGpuMemoryTracking(uint32_t sequence_num)121 void GpuChannelManager::GpuPeakMemoryMonitor::StartGpuMemoryTracking(
122     uint32_t sequence_num) {
123   sequence_trackers_.emplace(
124       sequence_num,
125       SequenceTracker(current_memory_, current_memory_per_source_));
126   TRACE_EVENT_ASYNC_BEGIN2("gpu", "PeakMemoryTracking", sequence_num, "start",
127                            current_memory_, "start_sources",
128                            StartTrackingTracedValue());
129 }
130 
StopGpuMemoryTracking(uint32_t sequence_num)131 void GpuChannelManager::GpuPeakMemoryMonitor::StopGpuMemoryTracking(
132     uint32_t sequence_num) {
133   auto sequence = sequence_trackers_.find(sequence_num);
134   if (sequence != sequence_trackers_.end()) {
135     TRACE_EVENT_ASYNC_END2("gpu", "PeakMemoryTracking", sequence_num, "peak",
136                            sequence->second.total_memory_, "end_sources",
137                            StopTrackingTracedValue(sequence->second));
138     sequence_trackers_.erase(sequence);
139   }
140 }
141 
142 base::WeakPtr<MemoryTracker::Observer>
GetWeakPtr()143 GpuChannelManager::GpuPeakMemoryMonitor::GetWeakPtr() {
144   return weak_factory_.GetWeakPtr();
145 }
146 
InvalidateWeakPtrs()147 void GpuChannelManager::GpuPeakMemoryMonitor::InvalidateWeakPtrs() {
148   weak_factory_.InvalidateWeakPtrs();
149 }
150 
SequenceTracker(uint64_t current_memory,base::flat_map<GpuPeakMemoryAllocationSource,uint64_t> current_memory_per_source)151 GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::SequenceTracker(
152     uint64_t current_memory,
153     base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
154         current_memory_per_source)
155     : initial_memory_(current_memory),
156       total_memory_(current_memory),
157       initial_memory_per_source_(current_memory_per_source),
158       peak_memory_per_source_(std::move(current_memory_per_source)) {}
159 
160 GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::SequenceTracker(
161     const SequenceTracker& other) = default;
162 
163 GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::~SequenceTracker() =
164     default;
165 
166 std::unique_ptr<base::trace_event::TracedValue>
StartTrackingTracedValue()167 GpuChannelManager::GpuPeakMemoryMonitor::StartTrackingTracedValue() {
168   auto dict = std::make_unique<base::trace_event::TracedValue>();
169   FormatAllocationSourcesForTracing(dict.get(), current_memory_per_source_);
170   return dict;
171 }
172 
173 std::unique_ptr<base::trace_event::TracedValue>
StopTrackingTracedValue(SequenceTracker & sequence)174 GpuChannelManager::GpuPeakMemoryMonitor::StopTrackingTracedValue(
175     SequenceTracker& sequence) {
176   auto dict = std::make_unique<base::trace_event::TracedValue>();
177   dict->BeginDictionary("source_totals");
178   FormatAllocationSourcesForTracing(dict.get(),
179                                     sequence.peak_memory_per_source_);
180   dict->EndDictionary();
181   dict->BeginDictionary("difference");
182   int total_diff = sequence.total_memory_ - sequence.initial_memory_;
183   dict->SetInteger("TOTAL", total_diff);
184   dict->EndDictionary();
185   dict->BeginDictionary("source_difference");
186 
187   for (auto it : sequence.peak_memory_per_source_) {
188     int diff = (it.second - sequence.initial_memory_per_source_[it.first]);
189     switch (it.first) {
190       case GpuPeakMemoryAllocationSource::UNKNOWN:
191         dict->SetInteger("UNKNOWN", diff);
192         break;
193       case GpuPeakMemoryAllocationSource::COMMAND_BUFFER:
194         dict->SetInteger("COMMAND_BUFFER", diff);
195         break;
196       case GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE:
197         dict->SetInteger("SHARED_CONTEXT_STATE", diff);
198         break;
199       case GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB:
200         dict->SetInteger("SHARED_IMAGE_STUB", diff);
201         break;
202       case GpuPeakMemoryAllocationSource::SKIA:
203         dict->SetInteger("SKIA", diff);
204         break;
205     }
206   }
207 
208   dict->EndDictionary();
209   return dict;
210 }
211 
OnMemoryAllocatedChange(CommandBufferId id,uint64_t old_size,uint64_t new_size,GpuPeakMemoryAllocationSource source)212 void GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange(
213     CommandBufferId id,
214     uint64_t old_size,
215     uint64_t new_size,
216     GpuPeakMemoryAllocationSource source) {
217   uint64_t diff = new_size - old_size;
218   current_memory_ += diff;
219   current_memory_per_source_[source] += diff;
220   if (old_size < new_size) {
221     // When memory has increased, iterate over the sequences to update their
222     // peak.
223     // TODO(jonross): This should be fine if we typically have 1-2 sequences.
224     // However if that grows we may end up iterating many times are memory
225     // approaches peak. If that is the case we should track a
226     // |peak_since_last_sequence_update_| on the the memory changes. Then only
227     // update the sequences with a new one is added, or the peak is requested.
228     for (auto& sequence : sequence_trackers_) {
229       if (current_memory_ > sequence.second.total_memory_) {
230         sequence.second.total_memory_ = current_memory_;
231         for (auto& sequence : sequence_trackers_) {
232           TRACE_EVENT_ASYNC_STEP_INTO1("gpu", "PeakMemoryTracking",
233                                        sequence.first, "Peak", "peak",
234                                        current_memory_);
235         }
236         for (auto& source : current_memory_per_source_) {
237           sequence.second.peak_memory_per_source_[source.first] = source.second;
238         }
239       }
240     }
241   }
242 }
243 
GpuChannelManager(const GpuPreferences & gpu_preferences,GpuChannelManagerDelegate * delegate,GpuWatchdogThread * watchdog,scoped_refptr<base::SingleThreadTaskRunner> task_runner,scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,Scheduler * scheduler,SyncPointManager * sync_point_manager,SharedImageManager * shared_image_manager,GpuMemoryBufferFactory * gpu_memory_buffer_factory,const GpuFeatureInfo & gpu_feature_info,GpuProcessActivityFlags activity_flags,scoped_refptr<gl::GLSurface> default_offscreen_surface,ImageDecodeAcceleratorWorker * image_decode_accelerator_worker,viz::VulkanContextProvider * vulkan_context_provider,viz::MetalContextProvider * metal_context_provider,viz::DawnContextProvider * dawn_context_provider)244 GpuChannelManager::GpuChannelManager(
245     const GpuPreferences& gpu_preferences,
246     GpuChannelManagerDelegate* delegate,
247     GpuWatchdogThread* watchdog,
248     scoped_refptr<base::SingleThreadTaskRunner> task_runner,
249     scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
250     Scheduler* scheduler,
251     SyncPointManager* sync_point_manager,
252     SharedImageManager* shared_image_manager,
253     GpuMemoryBufferFactory* gpu_memory_buffer_factory,
254     const GpuFeatureInfo& gpu_feature_info,
255     GpuProcessActivityFlags activity_flags,
256     scoped_refptr<gl::GLSurface> default_offscreen_surface,
257     ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
258     viz::VulkanContextProvider* vulkan_context_provider,
259     viz::MetalContextProvider* metal_context_provider,
260     viz::DawnContextProvider* dawn_context_provider)
261     : task_runner_(task_runner),
262       io_task_runner_(io_task_runner),
263       gpu_preferences_(gpu_preferences),
264       gpu_driver_bug_workarounds_(
265           gpu_feature_info.enabled_gpu_driver_bug_workarounds),
266       delegate_(delegate),
267       watchdog_(watchdog),
268       share_group_(new gl::GLShareGroup()),
269       mailbox_manager_(gles2::CreateMailboxManager(gpu_preferences)),
270       scheduler_(scheduler),
271       sync_point_manager_(sync_point_manager),
272       shared_image_manager_(shared_image_manager),
273       shader_translator_cache_(gpu_preferences_),
274       default_offscreen_surface_(std::move(default_offscreen_surface)),
275       gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
276       gpu_feature_info_(gpu_feature_info),
277       discardable_manager_(gpu_preferences_),
278       passthrough_discardable_manager_(gpu_preferences_),
279       image_decode_accelerator_worker_(image_decode_accelerator_worker),
280       activity_flags_(std::move(activity_flags)),
281       memory_pressure_listener_(
282           base::BindRepeating(&GpuChannelManager::HandleMemoryPressure,
283                               base::Unretained(this))),
284       vulkan_context_provider_(vulkan_context_provider),
285       metal_context_provider_(metal_context_provider),
286       dawn_context_provider_(dawn_context_provider) {
287   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
288   DCHECK(task_runner->BelongsToCurrentThread());
289   DCHECK(io_task_runner);
290   DCHECK(scheduler);
291 
292   const bool enable_gr_shader_cache =
293       (gpu_feature_info_.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
294        gpu::kGpuFeatureStatusEnabled) ||
295       features::IsUsingSkiaRenderer();
296   const bool disable_disk_cache =
297       gpu_preferences_.disable_gpu_shader_disk_cache;
298   if (enable_gr_shader_cache && !disable_disk_cache)
299     gr_shader_cache_.emplace(gpu_preferences.gpu_program_cache_size, this);
300 }
301 
~GpuChannelManager()302 GpuChannelManager::~GpuChannelManager() {
303   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
304 
305   // Clear |gpu_channels_| first to prevent reentrancy problems from GpuChannel
306   // destructor.
307   auto gpu_channels = std::move(gpu_channels_);
308   gpu_channels_.clear();
309   gpu_channels.clear();
310 
311   // Inavlidate here as the |shared_context_state_| attempts to call back to
312   // |this| in the middle of the deletion.
313   peak_memory_monitor_.InvalidateWeakPtrs();
314 
315   // Try to make the context current so that GPU resources can be destroyed
316   // correctly.
317   if (shared_context_state_)
318     shared_context_state_->MakeCurrent(nullptr);
319 
320   if (default_offscreen_surface_.get()) {
321     default_offscreen_surface_->Destroy();
322     default_offscreen_surface_ = nullptr;
323   }
324 }
325 
outputter()326 gles2::Outputter* GpuChannelManager::outputter() {
327   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
328 
329   if (!outputter_)
330     outputter_.reset(new gles2::TraceOutputter("GpuChannelManager Trace"));
331   return outputter_.get();
332 }
333 
program_cache()334 gles2::ProgramCache* GpuChannelManager::program_cache() {
335   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
336 
337   if (!program_cache_.get()) {
338     const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
339     bool disable_disk_cache =
340         gpu_preferences_.disable_gpu_shader_disk_cache ||
341         workarounds.disable_program_disk_cache;
342 
343     // Use the EGL blob cache extension for the passthrough decoder.
344     if (gpu_preferences_.use_passthrough_cmd_decoder &&
345         gles2::PassthroughCommandDecoderSupported()) {
346       program_cache_.reset(new gles2::PassthroughProgramCache(
347           gpu_preferences_.gpu_program_cache_size, disable_disk_cache));
348     } else {
349       program_cache_.reset(new gles2::MemoryProgramCache(
350           gpu_preferences_.gpu_program_cache_size, disable_disk_cache,
351           workarounds.disable_program_caching_for_transform_feedback,
352           &activity_flags_));
353     }
354   }
355   return program_cache_.get();
356 }
357 
RemoveChannel(int client_id)358 void GpuChannelManager::RemoveChannel(int client_id) {
359   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
360 
361   auto it = gpu_channels_.find(client_id);
362   if (it == gpu_channels_.end())
363     return;
364 
365   delegate_->DidDestroyChannel(client_id);
366 
367   // Erase the |gpu_channels_| entry before destroying the GpuChannel object to
368   // avoid reentrancy problems from the GpuChannel destructor.
369   std::unique_ptr<GpuChannel> channel = std::move(it->second);
370   gpu_channels_.erase(it);
371   channel.reset();
372 
373   if (gpu_channels_.empty()) {
374     delegate_->DidDestroyAllChannels();
375   }
376 }
377 
LookupChannel(int32_t client_id) const378 GpuChannel* GpuChannelManager::LookupChannel(int32_t client_id) const {
379   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
380 
381   const auto& it = gpu_channels_.find(client_id);
382   return it != gpu_channels_.end() ? it->second.get() : nullptr;
383 }
384 
set_share_group(gl::GLShareGroup * share_group)385 void GpuChannelManager::set_share_group(gl::GLShareGroup* share_group) {
386     share_group_ = share_group;
387 }
388 
EstablishChannel(int client_id,uint64_t client_tracing_id,bool is_gpu_host,bool cache_shaders_on_disk)389 GpuChannel* GpuChannelManager::EstablishChannel(int client_id,
390                                                 uint64_t client_tracing_id,
391                                                 bool is_gpu_host,
392                                                 bool cache_shaders_on_disk) {
393   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
394 
395   if (gr_shader_cache_ && cache_shaders_on_disk)
396     gr_shader_cache_->CacheClientIdOnDisk(client_id);
397 
398   std::unique_ptr<GpuChannel> gpu_channel = GpuChannel::Create(
399       this, scheduler_, sync_point_manager_, share_group_, task_runner_,
400       io_task_runner_, client_id, client_tracing_id, is_gpu_host,
401       image_decode_accelerator_worker_);
402 
403   if (!gpu_channel)
404     return nullptr;
405 
406   GpuChannel* gpu_channel_ptr = gpu_channel.get();
407   gpu_channels_[client_id] = std::move(gpu_channel);
408   return gpu_channel_ptr;
409 }
410 
InternalDestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,int client_id)411 void GpuChannelManager::InternalDestroyGpuMemoryBuffer(
412     gfx::GpuMemoryBufferId id,
413     int client_id) {
414   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
415 
416   gpu_memory_buffer_factory_->DestroyGpuMemoryBuffer(id, client_id);
417 }
418 
DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,int client_id,const SyncToken & sync_token)419 void GpuChannelManager::DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
420                                                int client_id,
421                                                const SyncToken& sync_token) {
422   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
423 
424   if (!sync_point_manager_->WaitOutOfOrder(
425           sync_token,
426           base::BindOnce(&GpuChannelManager::InternalDestroyGpuMemoryBuffer,
427                          base::Unretained(this), id, client_id))) {
428     // No sync token or invalid sync token, destroy immediately.
429     InternalDestroyGpuMemoryBuffer(id, client_id);
430   }
431 }
432 
PopulateShaderCache(int32_t client_id,const std::string & key,const std::string & program)433 void GpuChannelManager::PopulateShaderCache(int32_t client_id,
434                                             const std::string& key,
435                                             const std::string& program) {
436   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
437 
438   if (client_id == kGrShaderCacheClientId) {
439     if (gr_shader_cache_)
440       gr_shader_cache_->PopulateCache(key, program);
441     return;
442   }
443 
444   if (program_cache())
445     program_cache()->LoadProgram(key, program);
446 }
447 
LoseAllContexts()448 void GpuChannelManager::LoseAllContexts() {
449   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
450 
451   share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
452   for (auto& kv : gpu_channels_) {
453     kv.second->MarkAllContextsLost();
454   }
455   task_runner_->PostTask(FROM_HERE,
456                          base::BindOnce(&GpuChannelManager::DestroyAllChannels,
457                                         weak_factory_.GetWeakPtr()));
458   if (shared_context_state_) {
459     gr_cache_controller_.reset();
460     shared_context_state_->MarkContextLost();
461     shared_context_state_.reset();
462   }
463 }
464 
DestroyAllChannels()465 void GpuChannelManager::DestroyAllChannels() {
466   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
467 
468   // Clear |gpu_channels_| first to prevent reentrancy problems from GpuChannel
469   // destructor.
470   auto gpu_channels = std::move(gpu_channels_);
471   gpu_channels_.clear();
472   gpu_channels.clear();
473 }
474 
GetVideoMemoryUsageStats(VideoMemoryUsageStats * video_memory_usage_stats) const475 void GpuChannelManager::GetVideoMemoryUsageStats(
476     VideoMemoryUsageStats* video_memory_usage_stats) const {
477   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
478 
479   // For each context group, assign its memory usage to its PID
480   video_memory_usage_stats->process_map.clear();
481   uint64_t total_size = 0;
482   for (const auto& entry : gpu_channels_) {
483     const GpuChannel* channel = entry.second.get();
484     if (!channel->IsConnected())
485       continue;
486     uint64_t size = channel->GetMemoryUsage();
487     total_size += size;
488     video_memory_usage_stats->process_map[channel->GetClientPID()]
489         .video_memory += size;
490   }
491 
492   if (shared_context_state_ && !shared_context_state_->context_lost())
493     total_size += shared_context_state_->GetMemoryUsage();
494 
495   // Assign the total across all processes in the GPU process
496   video_memory_usage_stats->process_map[base::GetCurrentProcId()].video_memory =
497       total_size;
498   video_memory_usage_stats->process_map[base::GetCurrentProcId()]
499       .has_duplicates = true;
500 
501   video_memory_usage_stats->bytes_allocated = total_size;
502 }
503 
StartPeakMemoryMonitor(uint32_t sequence_num)504 void GpuChannelManager::StartPeakMemoryMonitor(uint32_t sequence_num) {
505   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
506 
507   peak_memory_monitor_.StartGpuMemoryTracking(sequence_num);
508 }
509 
510 base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
GetPeakMemoryUsage(uint32_t sequence_num,uint64_t * out_peak_memory)511 GpuChannelManager::GetPeakMemoryUsage(uint32_t sequence_num,
512                                       uint64_t* out_peak_memory) {
513   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
514   auto allocation_per_source =
515       peak_memory_monitor_.GetPeakMemoryUsage(sequence_num, out_peak_memory);
516   peak_memory_monitor_.StopGpuMemoryTracking(sequence_num);
517   return allocation_per_source;
518 }
519 
520 #if defined(OS_ANDROID)
DidAccessGpu()521 void GpuChannelManager::DidAccessGpu() {
522   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
523 
524   last_gpu_access_time_ = base::TimeTicks::Now();
525 }
526 
WakeUpGpu()527 void GpuChannelManager::WakeUpGpu() {
528   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
529 
530   begin_wake_up_time_ = base::TimeTicks::Now();
531   ScheduleWakeUpGpu();
532 }
533 
ScheduleWakeUpGpu()534 void GpuChannelManager::ScheduleWakeUpGpu() {
535   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
536 
537   base::TimeTicks now = base::TimeTicks::Now();
538   TRACE_EVENT2("gpu", "GpuChannelManager::ScheduleWakeUp", "idle_time",
539                (now - last_gpu_access_time_).InMilliseconds(),
540                "keep_awake_time", (now - begin_wake_up_time_).InMilliseconds());
541   if (now - last_gpu_access_time_ <
542       base::TimeDelta::FromMilliseconds(kMaxGpuIdleTimeMs))
543     return;
544   if (now - begin_wake_up_time_ >
545       base::TimeDelta::FromMilliseconds(kMaxKeepAliveTimeMs))
546     return;
547 
548   DoWakeUpGpu();
549 
550   base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
551       FROM_HERE,
552       base::BindOnce(&GpuChannelManager::ScheduleWakeUpGpu,
553                      weak_factory_.GetWeakPtr()),
554       base::TimeDelta::FromMilliseconds(kMaxGpuIdleTimeMs));
555 }
556 
DoWakeUpGpu()557 void GpuChannelManager::DoWakeUpGpu() {
558   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
559 
560   const CommandBufferStub* stub = nullptr;
561   for (const auto& kv : gpu_channels_) {
562     const GpuChannel* channel = kv.second.get();
563     stub = channel->GetOneStub();
564     if (stub) {
565       DCHECK(stub->decoder_context());
566       // With Vulkan, Dawn, etc, RasterDecoders don't use GL.
567       if (stub->decoder_context()->GetGLContext())
568         break;
569     }
570   }
571   if (!stub || !stub->decoder_context()->MakeCurrent())
572     return;
573   glFinish();
574   DidAccessGpu();
575 }
576 
OnBackgroundCleanup()577 void GpuChannelManager::OnBackgroundCleanup() {
578   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
579 
580   // Delete all the GL contexts when the channel does not use WebGL and Chrome
581   // goes to background on low-end devices.
582   std::vector<int> channels_to_clear;
583   for (auto& kv : gpu_channels_) {
584     // TODO(ssid): WebGL context loss event notification must be sent before
585     // clearing WebGL contexts crbug.com/725306.
586     if (kv.second->HasActiveWebGLContext())
587       continue;
588     channels_to_clear.push_back(kv.first);
589     kv.second->MarkAllContextsLost();
590   }
591   for (int channel : channels_to_clear)
592     RemoveChannel(channel);
593 
594   if (program_cache_)
595     program_cache_->Trim(0u);
596 
597   if (shared_context_state_) {
598     gr_cache_controller_.reset();
599     shared_context_state_->MarkContextLost();
600     shared_context_state_.reset();
601   }
602 
603   SkGraphics::PurgeAllCaches();
604 }
605 #endif
606 
OnApplicationBackgrounded()607 void GpuChannelManager::OnApplicationBackgrounded() {
608   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
609 
610   if (shared_context_state_) {
611     shared_context_state_->PurgeMemory(
612         base::MemoryPressureListener::MemoryPressureLevel::
613             MEMORY_PRESSURE_LEVEL_CRITICAL);
614   }
615 
616   // Release all skia caching when the application is backgrounded.
617   SkGraphics::PurgeAllCaches();
618 }
619 
HandleMemoryPressure(base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level)620 void GpuChannelManager::HandleMemoryPressure(
621     base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
622   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
623 
624   if (program_cache_)
625     program_cache_->HandleMemoryPressure(memory_pressure_level);
626   discardable_manager_.HandleMemoryPressure(memory_pressure_level);
627   passthrough_discardable_manager_.HandleMemoryPressure(memory_pressure_level);
628   if (shared_context_state_)
629     shared_context_state_->PurgeMemory(memory_pressure_level);
630   if (gr_shader_cache_)
631     gr_shader_cache_->PurgeMemory(memory_pressure_level);
632 #if defined(OS_WIN)
633   TrimD3DResources();
634 #endif
635 }
636 
GetSharedContextState(ContextResult * result)637 scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
638     ContextResult* result) {
639   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
640 
641   if (shared_context_state_ && !shared_context_state_->context_lost()) {
642     *result = ContextResult::kSuccess;
643     return shared_context_state_;
644   }
645 
646   scoped_refptr<gl::GLSurface> surface = default_offscreen_surface();
647   bool use_virtualized_gl_contexts = false;
648 #if defined(OS_MACOSX)
649   // Virtualize GpuPreference::kLowPower contexts by default on OS X to prevent
650   // performance regressions when enabling FCM.
651   // http://crbug.com/180463
652   use_virtualized_gl_contexts = true;
653 #endif
654   use_virtualized_gl_contexts |=
655       gpu_driver_bug_workarounds_.use_virtualized_gl_contexts;
656   // MailboxManagerSync synchronization correctness currently depends on having
657   // only a single context. See crbug.com/510243 for details.
658   use_virtualized_gl_contexts |= mailbox_manager_->UsesSync();
659 
660   const bool use_passthrough_decoder =
661       gles2::PassthroughCommandDecoderSupported() &&
662       gpu_preferences_.use_passthrough_cmd_decoder;
663   scoped_refptr<gl::GLShareGroup> share_group;
664   if (use_passthrough_decoder) {
665     share_group = new gl::GLShareGroup();
666     // Virtualized contexts don't work with passthrough command decoder.
667     // See https://crbug.com/914976
668     use_virtualized_gl_contexts = false;
669   } else {
670     share_group = share_group_;
671   }
672 
673   scoped_refptr<gl::GLContext> context =
674       use_virtualized_gl_contexts ? share_group->shared_context() : nullptr;
675   if (context && (!context->MakeCurrent(surface.get()) ||
676                   context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
677     context = nullptr;
678   }
679   if (!context) {
680     gl::GLContextAttribs attribs = gles2::GenerateGLContextAttribs(
681         ContextCreationAttribs(), use_passthrough_decoder);
682 
683     // Only skip validation if the GLContext will be used exclusively by the
684     // SharedContextState.
685     attribs.can_skip_validation = !use_virtualized_gl_contexts;
686     context =
687         gl::init::CreateGLContext(share_group.get(), surface.get(), attribs);
688     if (!context) {
689       // TODO(piman): This might not be fatal, we could recurse into
690       // CreateGLContext to get more info, tho it should be exceedingly
691       // rare and may not be recoverable anyway.
692       LOG(ERROR) << "ContextResult::kFatalFailure: "
693                     "Failed to create shared context for virtualization.";
694       *result = ContextResult::kFatalFailure;
695       return nullptr;
696     }
697     // Ensure that context creation did not lose track of the intended share
698     // group.
699     DCHECK(context->share_group() == share_group.get());
700     gpu_feature_info_.ApplyToGLContext(context.get());
701 
702     if (use_virtualized_gl_contexts)
703       share_group->SetSharedContext(context.get());
704   }
705 
706   // This should be either:
707   // (1) a non-virtual GL context, or
708   // (2) a mock/stub context.
709   DCHECK(context->GetHandle() ||
710          gl::GetGLImplementation() == gl::kGLImplementationMockGL ||
711          gl::GetGLImplementation() == gl::kGLImplementationStubGL);
712 
713   if (!context->MakeCurrent(surface.get())) {
714     LOG(ERROR)
715         << "ContextResult::kTransientFailure, failed to make context current";
716     *result = ContextResult::kTransientFailure;
717     return nullptr;
718   }
719 
720   // TODO(penghuang): https://crbug.com/899735 Handle device lost for Vulkan.
721   shared_context_state_ = base::MakeRefCounted<SharedContextState>(
722       std::move(share_group), std::move(surface), std::move(context),
723       use_virtualized_gl_contexts,
724       base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this),
725                      /*synthetic_loss=*/false),
726       gpu_preferences_.gr_context_type, vulkan_context_provider_,
727       metal_context_provider_, dawn_context_provider_,
728       peak_memory_monitor_.GetWeakPtr());
729 
730   // OOP-R needs GrContext for raster tiles.
731   bool need_gr_context =
732       gpu_feature_info_.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
733       gpu::kGpuFeatureStatusEnabled;
734 
735   // SkiaRenderer needs GrContext to composite output surface.
736   need_gr_context |= features::IsUsingSkiaRenderer();
737 
738   if (need_gr_context) {
739     if (gpu_preferences_.gr_context_type == gpu::GrContextType::kGL) {
740       auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
741           gpu_driver_bug_workarounds(), gpu_feature_info());
742       if (!shared_context_state_->InitializeGL(gpu_preferences_,
743                                                feature_info.get())) {
744         shared_context_state_ = nullptr;
745         LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize GL "
746                       "for SharedContextState";
747         *result = ContextResult::kFatalFailure;
748         return nullptr;
749       }
750     }
751     shared_context_state_->InitializeGrContext(
752         gpu_preferences_, gpu_driver_bug_workarounds_, gr_shader_cache(),
753         &activity_flags_, watchdog_);
754   }
755 
756   gr_cache_controller_.emplace(shared_context_state_.get(), task_runner_);
757 
758   *result = ContextResult::kSuccess;
759   return shared_context_state_;
760 }
761 
OnContextLost(bool synthetic_loss)762 void GpuChannelManager::OnContextLost(bool synthetic_loss) {
763   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
764 
765   if (synthetic_loss)
766     return;
767 
768   // Lose all other contexts.
769   if (gl::GLContext::LosesAllContextsOnContextLost() ||
770       (shared_context_state_ &&
771        shared_context_state_->use_virtualized_gl_contexts())) {
772     delegate_->LoseAllContexts();
773   }
774 
775   // Work around issues with recovery by allowing a new GPU process to launch.
776   if (gpu_driver_bug_workarounds_.exit_on_context_lost)
777     delegate_->MaybeExitOnContextLost();
778 }
779 
ScheduleGrContextCleanup()780 void GpuChannelManager::ScheduleGrContextCleanup() {
781   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
782 
783   if (gr_cache_controller_)
784     gr_cache_controller_->ScheduleGrContextCleanup();
785 }
786 
StoreShader(const std::string & key,const std::string & shader)787 void GpuChannelManager::StoreShader(const std::string& key,
788                                     const std::string& shader) {
789   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
790 
791   delegate_->StoreShaderToDisk(kGrShaderCacheClientId, key, shader);
792 }
793 
SetImageDecodeAcceleratorWorkerForTesting(ImageDecodeAcceleratorWorker * worker)794 void GpuChannelManager::SetImageDecodeAcceleratorWorkerForTesting(
795     ImageDecodeAcceleratorWorker* worker) {
796   DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
797 
798   DCHECK(gpu_channels_.empty());
799   image_decode_accelerator_worker_ = worker;
800 }
801 
802 }  // namespace gpu
803