1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/viz/host/host_gpu_memory_buffer_manager.h"
6 
7 #include <utility>
8 
9 #include "base/bind.h"
10 #include "base/logging.h"
11 #include "base/threading/thread_restrictions.h"
12 #include "base/trace_event/memory_dump_manager.h"
13 #include "base/trace_event/process_memory_dump.h"
14 #include "gpu/ipc/common/gpu_memory_buffer_impl.h"
15 #include "gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.h"
16 #include "gpu/ipc/common/gpu_memory_buffer_support.h"
17 #include "services/viz/privileged/mojom/gl/gpu_service.mojom.h"
18 #include "ui/base/ui_base_features.h"
19 #include "ui/gfx/buffer_format_util.h"
20 #include "ui/gfx/buffer_usage_util.h"
21 
22 namespace viz {
23 
24 namespace {
25 
OnGpuMemoryBufferDestroyed(scoped_refptr<base::SingleThreadTaskRunner> task_runner,gpu::GpuMemoryBufferImpl::DestructionCallback callback,const gpu::SyncToken & sync_token)26 void OnGpuMemoryBufferDestroyed(
27     scoped_refptr<base::SingleThreadTaskRunner> task_runner,
28     gpu::GpuMemoryBufferImpl::DestructionCallback callback,
29     const gpu::SyncToken& sync_token) {
30   task_runner->PostTask(FROM_HERE,
31                         base::BindOnce(std::move(callback), sync_token));
32 }
33 
34 }  // namespace
35 
36 HostGpuMemoryBufferManager::PendingBufferInfo::PendingBufferInfo() = default;
37 HostGpuMemoryBufferManager::PendingBufferInfo::PendingBufferInfo(
38     PendingBufferInfo&&) = default;
39 HostGpuMemoryBufferManager::PendingBufferInfo::~PendingBufferInfo() = default;
40 
HostGpuMemoryBufferManager(GpuServiceProvider gpu_service_provider,int client_id,std::unique_ptr<gpu::GpuMemoryBufferSupport> gpu_memory_buffer_support,scoped_refptr<base::SingleThreadTaskRunner> task_runner)41 HostGpuMemoryBufferManager::HostGpuMemoryBufferManager(
42     GpuServiceProvider gpu_service_provider,
43     int client_id,
44     std::unique_ptr<gpu::GpuMemoryBufferSupport> gpu_memory_buffer_support,
45     scoped_refptr<base::SingleThreadTaskRunner> task_runner)
46     : gpu_service_provider_(gpu_service_provider),
47       client_id_(client_id),
48       gpu_memory_buffer_support_(std::move(gpu_memory_buffer_support)),
49       task_runner_(std::move(task_runner)) {
50   bool should_get_native_configs = true;
51 #if defined(USE_X11)
52   should_get_native_configs = features::IsUsingOzonePlatform();
53 #endif
54   if (should_get_native_configs) {
55     native_configurations_ = gpu::GetNativeGpuMemoryBufferConfigurations(
56         gpu_memory_buffer_support_.get());
57     native_configurations_initialized_.Signal();
58   }
59   base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
60       this, "HostGpuMemoryBufferManager", task_runner_);
61 }
62 
~HostGpuMemoryBufferManager()63 HostGpuMemoryBufferManager::~HostGpuMemoryBufferManager() {
64   DCHECK(task_runner_->BelongsToCurrentThread());
65   base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
66       this);
67 }
68 
DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,int client_id,const gpu::SyncToken & sync_token)69 void HostGpuMemoryBufferManager::DestroyGpuMemoryBuffer(
70     gfx::GpuMemoryBufferId id,
71     int client_id,
72     const gpu::SyncToken& sync_token) {
73   DCHECK(task_runner_->BelongsToCurrentThread());
74   auto client_iter = allocated_buffers_.find(client_id);
75   if (client_iter == allocated_buffers_.end())
76     return;
77   auto& buffers = client_iter->second;
78   auto buffer_iter = buffers.find(id);
79   if (buffer_iter == buffers.end())
80     return;
81   DCHECK_NE(gfx::EMPTY_BUFFER, buffer_iter->second.type());
82   if (buffer_iter->second.type() != gfx::SHARED_MEMORY_BUFFER) {
83     auto* gpu_service = GetGpuService();
84     DCHECK(gpu_service);
85     gpu_service->DestroyGpuMemoryBuffer(id, client_id, sync_token);
86   }
87   buffers.erase(buffer_iter);
88 }
89 
DestroyAllGpuMemoryBufferForClient(int client_id)90 void HostGpuMemoryBufferManager::DestroyAllGpuMemoryBufferForClient(
91     int client_id) {
92   DCHECK(task_runner_->BelongsToCurrentThread());
93   auto client_iter = allocated_buffers_.find(client_id);
94   if (client_iter != allocated_buffers_.end()) {
95     auto& buffers = client_iter->second;
96     for (const auto& pair : buffers) {
97       DCHECK_NE(gfx::EMPTY_BUFFER, pair.second.type());
98       if (pair.second.type() != gfx::SHARED_MEMORY_BUFFER) {
99         auto* gpu_service = GetGpuService();
100         DCHECK(gpu_service);
101         gpu_service->DestroyGpuMemoryBuffer(pair.first, client_id,
102                                             gpu::SyncToken());
103       }
104     }
105     allocated_buffers_.erase(client_iter);
106   }
107   auto pending_client_iter = pending_buffers_.find(client_id);
108   if (pending_client_iter != pending_buffers_.end()) {
109     auto& buffers = pending_client_iter->second;
110     for (auto& pair : buffers)
111       std::move(pair.second.callback).Run(gfx::GpuMemoryBufferHandle());
112     pending_buffers_.erase(pending_client_iter);
113   }
114 }
115 
AllocateGpuMemoryBuffer(gfx::GpuMemoryBufferId id,int client_id,const gfx::Size & size,gfx::BufferFormat format,gfx::BufferUsage usage,gpu::SurfaceHandle surface_handle,base::OnceCallback<void (gfx::GpuMemoryBufferHandle)> callback)116 void HostGpuMemoryBufferManager::AllocateGpuMemoryBuffer(
117     gfx::GpuMemoryBufferId id,
118     int client_id,
119     const gfx::Size& size,
120     gfx::BufferFormat format,
121     gfx::BufferUsage usage,
122     gpu::SurfaceHandle surface_handle,
123     base::OnceCallback<void(gfx::GpuMemoryBufferHandle)> callback) {
124   DCHECK(task_runner_->BelongsToCurrentThread());
125   if (!weak_ptr_)
126     weak_ptr_ = weak_factory_.GetWeakPtr();
127   if (gpu_memory_buffer_support_->GetNativeGpuMemoryBufferType() !=
128           gfx::EMPTY_BUFFER &&
129       IsNativeGpuMemoryBufferConfiguration(format, usage)) {
130     if (auto* gpu_service = GetGpuService()) {
131       PendingBufferInfo buffer_info;
132       buffer_info.size = size;
133       buffer_info.format = format;
134       buffer_info.usage = usage;
135       buffer_info.surface_handle = surface_handle;
136       buffer_info.callback = std::move(callback);
137       pending_buffers_[client_id].insert(
138           std::make_pair(id, std::move(buffer_info)));
139       gpu_service->CreateGpuMemoryBuffer(
140           id, size, format, usage, client_id, surface_handle,
141           base::BindOnce(
142               &HostGpuMemoryBufferManager::OnGpuMemoryBufferAllocated,
143               weak_ptr_, gpu_service_version_, client_id, id));
144     } else {
145       // GPU service failed to start. Run the callback with null handle.
146       std::move(callback).Run(gfx::GpuMemoryBufferHandle());
147     }
148     return;
149   }
150 
151   gfx::GpuMemoryBufferHandle buffer_handle;
152   // The requests are coming in from untrusted clients. So verify that it is
153   // possible to allocate shared memory buffer first.
154   if (gpu::GpuMemoryBufferImplSharedMemory::IsUsageSupported(usage) &&
155       gpu::GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(size,
156                                                                  format)) {
157     buffer_handle = gpu::GpuMemoryBufferImplSharedMemory::CreateGpuMemoryBuffer(
158         id, size, format, usage);
159     DCHECK_EQ(gfx::SHARED_MEMORY_BUFFER, buffer_handle.type);
160     AllocatedBufferInfo buffer_info(buffer_handle, size, format);
161     allocated_buffers_[client_id].insert(
162         std::make_pair(buffer_handle.id, buffer_info));
163   }
164 
165   task_runner_->PostTask(
166       FROM_HERE, base::BindOnce(std::move(callback), std::move(buffer_handle)));
167 }
168 
IsNativeGpuMemoryBufferConfiguration(gfx::BufferFormat format,gfx::BufferUsage usage) const169 bool HostGpuMemoryBufferManager::IsNativeGpuMemoryBufferConfiguration(
170     gfx::BufferFormat format,
171     gfx::BufferUsage usage) const {
172   DCHECK(task_runner_->BelongsToCurrentThread());
173   {
174     base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
175     native_configurations_initialized_.Wait();
176   }
177   return native_configurations_.find(gfx::BufferUsageAndFormat(
178              usage, format)) != native_configurations_.end();
179 }
180 
181 std::unique_ptr<gfx::GpuMemoryBuffer>
CreateGpuMemoryBuffer(const gfx::Size & size,gfx::BufferFormat format,gfx::BufferUsage usage,gpu::SurfaceHandle surface_handle)182 HostGpuMemoryBufferManager::CreateGpuMemoryBuffer(
183     const gfx::Size& size,
184     gfx::BufferFormat format,
185     gfx::BufferUsage usage,
186     gpu::SurfaceHandle surface_handle) {
187   gfx::GpuMemoryBufferId id(next_gpu_memory_id_++);
188   gfx::GpuMemoryBufferHandle handle;
189   base::WaitableEvent wait_event(
190       base::WaitableEvent::ResetPolicy::MANUAL,
191       base::WaitableEvent::InitialState::NOT_SIGNALED);
192   DCHECK(!task_runner_->BelongsToCurrentThread());
193   auto reply_callback = base::BindOnce(
194       [](gfx::GpuMemoryBufferHandle* handle, base::WaitableEvent* wait_event,
195          gfx::GpuMemoryBufferHandle allocated_buffer_handle) {
196         *handle = std::move(allocated_buffer_handle);
197         wait_event->Signal();
198       },
199       &handle, &wait_event);
200   // We block with a WaitableEvent until the callback is run. So using
201   // base::Unretained() is safe here.
202   auto allocate_callback =
203       base::BindOnce(&HostGpuMemoryBufferManager::AllocateGpuMemoryBuffer,
204                      base::Unretained(this), id, client_id_, size, format,
205                      usage, surface_handle, std::move(reply_callback));
206   task_runner_->PostTask(FROM_HERE, std::move(allocate_callback));
207   base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope
208       allow_base_sync_primitives;
209   wait_event.Wait();
210   if (handle.is_null())
211     return nullptr;
212   // The destruction callback can be called on any thread. So use an
213   // intermediate callback here as the destruction callback, which bounces off
214   // onto the |task_runner_| thread to do the real work.
215   return gpu_memory_buffer_support_->CreateGpuMemoryBufferImplFromHandle(
216       std::move(handle), size, format, usage,
217       base::BindOnce(
218           &OnGpuMemoryBufferDestroyed, task_runner_,
219           base::BindOnce(&HostGpuMemoryBufferManager::DestroyGpuMemoryBuffer,
220                          weak_ptr_, id, client_id_)));
221 }
222 
SetDestructionSyncToken(gfx::GpuMemoryBuffer * buffer,const gpu::SyncToken & sync_token)223 void HostGpuMemoryBufferManager::SetDestructionSyncToken(
224     gfx::GpuMemoryBuffer* buffer,
225     const gpu::SyncToken& sync_token) {
226   static_cast<gpu::GpuMemoryBufferImpl*>(buffer)->set_destruction_sync_token(
227       sync_token);
228 }
229 
OnMemoryDump(const base::trace_event::MemoryDumpArgs & args,base::trace_event::ProcessMemoryDump * pmd)230 bool HostGpuMemoryBufferManager::OnMemoryDump(
231     const base::trace_event::MemoryDumpArgs& args,
232     base::trace_event::ProcessMemoryDump* pmd) {
233   DCHECK(task_runner_->BelongsToCurrentThread());
234   for (const auto& pair : allocated_buffers_) {
235     int client_id = pair.first;
236     uint64_t client_tracing_process_id = ClientIdToTracingId(client_id);
237     for (const auto& buffer_pair : pair.second) {
238       auto& buffer_info = buffer_pair.second;
239       if (!buffer_info.OnMemoryDump(pmd, client_id, client_tracing_process_id))
240         return false;
241     }
242   }
243   return true;
244 }
245 
SetNativeConfigurations(gpu::GpuMemoryBufferConfigurationSet native_configurations)246 void HostGpuMemoryBufferManager::SetNativeConfigurations(
247     gpu::GpuMemoryBufferConfigurationSet native_configurations) {
248   // Must not be done on the task runner thread to avoid deadlock.
249   DCHECK(!task_runner_->BelongsToCurrentThread());
250   if (native_configurations_initialized_.IsSignaled()) {
251     // The configurations are set on GPU initialization and should not change.
252     DCHECK(native_configurations_ == native_configurations);
253   } else {
254     native_configurations_ = native_configurations;
255     native_configurations_initialized_.Signal();
256   }
257 }
258 
GetGpuService()259 mojom::GpuService* HostGpuMemoryBufferManager::GetGpuService() {
260   DCHECK(task_runner_->BelongsToCurrentThread());
261 
262   if (gpu_service_)
263     return gpu_service_;
264 
265   gpu_service_ = gpu_service_provider_.Run(base::BindOnce(
266       &HostGpuMemoryBufferManager::OnConnectionError, weak_ptr_));
267   return gpu_service_;
268 }
269 
OnConnectionError()270 void HostGpuMemoryBufferManager::OnConnectionError() {
271   DCHECK(task_runner_->BelongsToCurrentThread());
272 
273   gpu_service_ = nullptr;
274   gpu_service_version_++;
275 
276   // Drop allocated buffers.
277   allocated_buffers_.clear();
278 
279   // Retry requesting pending buffer allocations.
280   auto pending_buffers = std::move(pending_buffers_);
281   pending_buffers_.clear();
282   for (auto& client_pair : pending_buffers) {
283     for (auto& buffer_pair : client_pair.second) {
284       auto& buffer = buffer_pair.second;
285       LOG(WARNING) << "Retrying allocation of GpuMemoryBuffer with id = "
286                    << buffer_pair.first.id
287                    << ", client_id = " << client_pair.first
288                    << ", size = " << buffer.size.ToString()
289                    << ", format = " << gfx::BufferFormatToString(buffer.format)
290                    << ", usage = " << gfx::BufferUsageToString(buffer.usage)
291                    << ", surface_handle = "
292                    << buffer.surface_handle
293                    << " due to connection error";
294       AllocateGpuMemoryBuffer(
295           buffer_pair.first, client_pair.first, buffer.size, buffer.format,
296           buffer.usage, buffer.surface_handle, std::move(buffer.callback));
297     }
298   }
299 }
300 
ClientIdToTracingId(int client_id) const301 uint64_t HostGpuMemoryBufferManager::ClientIdToTracingId(int client_id) const {
302   if (client_id == client_id_) {
303     return base::trace_event::MemoryDumpManager::GetInstance()
304         ->GetTracingProcessId();
305   }
306   // TODO(sad|ssid): Find a better way once https://crbug.com/661257 is
307   // resolved.  The hash value is incremented so that the tracing id is never
308   // equal to MemoryDumpManager::kInvalidTracingProcessId.
309   return static_cast<uint64_t>(base::PersistentHash(
310              base::as_bytes(base::make_span(&client_id, 1)))) +
311          1;
312 }
313 
OnGpuMemoryBufferAllocated(int gpu_service_version,int client_id,gfx::GpuMemoryBufferId id,gfx::GpuMemoryBufferHandle handle)314 void HostGpuMemoryBufferManager::OnGpuMemoryBufferAllocated(
315     int gpu_service_version,
316     int client_id,
317     gfx::GpuMemoryBufferId id,
318     gfx::GpuMemoryBufferHandle handle) {
319   DCHECK(task_runner_->BelongsToCurrentThread());
320 
321   // If the buffer is allocated by an old gpu service, we can safely ignore it
322   // as we have already requested a new one on the new gpu service in
323   // OnConnectionError().
324   if (gpu_service_version_ != gpu_service_version)
325     return;
326 
327   auto client_iter = pending_buffers_.find(client_id);
328   if (client_iter == pending_buffers_.end()) {
329     // The client has been destroyed since the allocation request was made. The
330     // callback is already called with null handle.
331     if (!handle.is_null()) {
332       auto* gpu_service = GetGpuService();
333       DCHECK(gpu_service);
334       gpu_service->DestroyGpuMemoryBuffer(handle.id, client_id,
335                                           gpu::SyncToken());
336     }
337     return;
338   }
339 
340   auto buffer_iter = client_iter->second.find(id);
341   DCHECK(buffer_iter != client_iter->second.end());
342   PendingBufferInfo pending_buffer = std::move(buffer_iter->second);
343   client_iter->second.erase(buffer_iter);
344 
345   if (!handle.is_null()) {
346     DCHECK(handle.id == id);
347 
348     AllocatedBufferInfo buffer_info(handle, pending_buffer.size,
349                                     pending_buffer.format);
350     allocated_buffers_[client_id].insert(std::make_pair(id, buffer_info));
351   }
352   std::move(pending_buffer.callback).Run(std::move(handle));
353 }
354 
355 }  // namespace viz
356