1 // Copyright 2019 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "gpu/command_buffer/service/external_vk_image_backing.h"
6 
7 #include <utility>
8 #include <vector>
9 
10 #include "base/stl_util.h"
11 #include "build/build_config.h"
12 #include "components/viz/common/resources/resource_sizes.h"
13 #include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
14 #include "gpu/command_buffer/service/external_vk_image_overlay_representation.h"
15 #include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
16 #include "gpu/command_buffer/service/skia_utils.h"
17 #include "gpu/ipc/common/vulkan_ycbcr_info.h"
18 #include "gpu/vulkan/vma_wrapper.h"
19 #include "gpu/vulkan/vulkan_command_buffer.h"
20 #include "gpu/vulkan/vulkan_command_pool.h"
21 #include "gpu/vulkan/vulkan_device_queue.h"
22 #include "gpu/vulkan/vulkan_fence_helper.h"
23 #include "gpu/vulkan/vulkan_image.h"
24 #include "gpu/vulkan/vulkan_implementation.h"
25 #include "gpu/vulkan/vulkan_util.h"
26 #include "third_party/skia/include/gpu/GrBackendSemaphore.h"
27 #include "ui/gfx/buffer_format_util.h"
28 #include "ui/gl/buildflags.h"
29 #include "ui/gl/gl_context.h"
30 #include "ui/gl/gl_version_info.h"
31 #include "ui/gl/scoped_binders.h"
32 
33 #if (defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_BSD)) && BUILDFLAG(USE_DAWN)
34 #include "gpu/command_buffer/service/external_vk_image_dawn_representation.h"
35 #endif
36 
37 #if defined(OS_FUCHSIA)
38 #include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
39 #endif
40 
41 #define GL_DEDICATED_MEMORY_OBJECT_EXT 0x9581
42 #define GL_TEXTURE_TILING_EXT 0x9580
43 #define GL_TILING_TYPES_EXT 0x9583
44 #define GL_OPTIMAL_TILING_EXT 0x9584
45 #define GL_LINEAR_TILING_EXT 0x9585
46 #define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
47 #define GL_HANDLE_TYPE_OPAQUE_WIN32_EXT 0x9587
48 #define GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE 0x93AE
49 #define GL_HANDLE_TYPE_ZIRCON_EVENT_ANGLE 0x93AF
50 
51 namespace gpu {
52 
53 namespace {
54 
55 static const struct {
56   GLenum gl_format;
57   GLenum gl_type;
58   GLuint bytes_per_pixel;
59 } kFormatTable[] = {
60     {GL_RGBA, GL_UNSIGNED_BYTE, 4},                // RGBA_8888
61     {GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, 2},       // RGBA_4444
62     {GL_BGRA, GL_UNSIGNED_BYTE, 4},                // BGRA_8888
63     {GL_RED, GL_UNSIGNED_BYTE, 1},                 // ALPHA_8
64     {GL_RED, GL_UNSIGNED_BYTE, 1},                 // LUMINANCE_8
65     {GL_RGB, GL_UNSIGNED_SHORT_5_6_5, 2},          // RGB_565
66     {GL_BGR, GL_UNSIGNED_SHORT_5_6_5, 2},          // BGR_565
67     {GL_ZERO, GL_ZERO, 0},                         // ETC1
68     {GL_RED, GL_UNSIGNED_BYTE, 1},                 // RED_8
69     {GL_RG, GL_UNSIGNED_BYTE, 2},                  // RG_88
70     {GL_RED, GL_HALF_FLOAT_OES, 2},                // LUMINANCE_F16
71     {GL_RGBA, GL_HALF_FLOAT_OES, 8},               // RGBA_F16
72     {GL_RED, GL_UNSIGNED_SHORT, 2},                // R16_EXT
73     {GL_RGBA, GL_UNSIGNED_BYTE, 4},                // RGBX_8888
74     {GL_BGRA, GL_UNSIGNED_BYTE, 4},                // BGRX_8888
75     {GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, 4},  // RGBA_1010102
76     {GL_BGRA, GL_UNSIGNED_INT_2_10_10_10_REV, 4},  // BGRA_1010102
77     {GL_ZERO, GL_ZERO, 0},                         // YVU_420
78     {GL_ZERO, GL_ZERO, 0},                         // YUV_420_BIPLANAR
79     {GL_ZERO, GL_ZERO, 0},                         // P010
80 };
81 static_assert(base::size(kFormatTable) == (viz::RESOURCE_FORMAT_MAX + 1),
82               "kFormatTable does not handle all cases.");
83 
84 class ScopedPixelStore {
85  public:
ScopedPixelStore(gl::GLApi * api,GLenum name,GLint value)86   ScopedPixelStore(gl::GLApi* api, GLenum name, GLint value)
87       : api_(api), name_(name), value_(value) {
88     api_->glGetIntegervFn(name_, &old_value_);
89     if (value_ != old_value_)
90       api->glPixelStoreiFn(name_, value_);
91   }
~ScopedPixelStore()92   ~ScopedPixelStore() {
93     if (value_ != old_value_)
94       api_->glPixelStoreiFn(name_, old_value_);
95   }
96 
97  private:
98   gl::GLApi* const api_;
99   const GLenum name_;
100   const GLint value_;
101   GLint old_value_;
102 
103   DISALLOW_COPY_AND_ASSIGN(ScopedPixelStore);
104 };
105 
106 class ScopedDedicatedMemoryObject {
107  public:
ScopedDedicatedMemoryObject(gl::GLApi * api)108   explicit ScopedDedicatedMemoryObject(gl::GLApi* api) : api_(api) {
109     api_->glCreateMemoryObjectsEXTFn(1, &id_);
110     int dedicated = GL_TRUE;
111     api_->glMemoryObjectParameterivEXTFn(id_, GL_DEDICATED_MEMORY_OBJECT_EXT,
112                                          &dedicated);
113   }
~ScopedDedicatedMemoryObject()114   ~ScopedDedicatedMemoryObject() { api_->glDeleteMemoryObjectsEXTFn(1, &id_); }
115 
id() const116   GLuint id() const { return id_; }
117 
118  private:
119   gl::GLApi* const api_;
120   GLuint id_;
121 };
122 
UseSeparateGLTexture(SharedContextState * context_state,viz::ResourceFormat format)123 bool UseSeparateGLTexture(SharedContextState* context_state,
124                           viz::ResourceFormat format) {
125   if (!context_state->support_vulkan_external_object())
126     return true;
127 
128   if (format != viz::ResourceFormat::BGRA_8888)
129     return false;
130 
131   auto* gl_context = context_state->real_context();
132   const auto* version_info = gl_context->GetVersionInfo();
133   const auto& ext = gl_context->GetCurrentGL()->Driver->ext;
134   if (!ext.b_GL_EXT_texture_format_BGRA8888)
135     return true;
136 
137   if (!version_info->is_angle)
138     return false;
139 
140   // If ANGLE is using vulkan, there is no problem for importing BGRA8888
141   // textures.
142   if (version_info->is_angle_vulkan)
143     return false;
144 
145   // ANGLE claims GL_EXT_texture_format_BGRA8888, but glTexStorageMem2DEXT
146   // doesn't work correctly.
147   // TODO(crbug.com/angleproject/4831): fix ANGLE and return false.
148   return true;
149 }
150 
UseMinimalUsageFlags(SharedContextState * context_state)151 bool UseMinimalUsageFlags(SharedContextState* context_state) {
152   return context_state->support_gl_external_object_flags();
153 }
154 
WaitSemaphoresOnGrContext(GrDirectContext * gr_context,std::vector<ExternalSemaphore> * semaphores)155 void WaitSemaphoresOnGrContext(GrDirectContext* gr_context,
156                                std::vector<ExternalSemaphore>* semaphores) {
157   DCHECK(!gr_context->abandoned());
158   std::vector<GrBackendSemaphore> backend_senampres;
159   backend_senampres.reserve(semaphores->size());
160   for (auto& semaphore : *semaphores) {
161     backend_senampres.emplace_back();
162     backend_senampres.back().initVulkan(semaphore.GetVkSemaphore());
163   }
164   gr_context->wait(backend_senampres.size(), backend_senampres.data(),
165                    /*deleteSemaphoreAfterWait=*/false);
166 }
167 
168 }  // namespace
169 
170 // static
Create(scoped_refptr<SharedContextState> context_state,VulkanCommandPool * command_pool,const Mailbox & mailbox,viz::ResourceFormat format,const gfx::Size & size,const gfx::ColorSpace & color_space,GrSurfaceOrigin surface_origin,SkAlphaType alpha_type,uint32_t usage,const VulkanImageUsageCache * image_usage_cache,base::span<const uint8_t> pixel_data,bool using_gmb)171 std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
172     scoped_refptr<SharedContextState> context_state,
173     VulkanCommandPool* command_pool,
174     const Mailbox& mailbox,
175     viz::ResourceFormat format,
176     const gfx::Size& size,
177     const gfx::ColorSpace& color_space,
178     GrSurfaceOrigin surface_origin,
179     SkAlphaType alpha_type,
180     uint32_t usage,
181     const VulkanImageUsageCache* image_usage_cache,
182     base::span<const uint8_t> pixel_data,
183     bool using_gmb) {
184   bool is_external = context_state->support_vulkan_external_object();
185   bool is_transfer_dst = using_gmb || !pixel_data.empty() || !is_external;
186 
187   auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
188   VkFormat vk_format = ToVkFormat(format);
189   constexpr auto kUsageNeedsColorAttachment =
190       SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER |
191       SHARED_IMAGE_USAGE_OOP_RASTERIZATION | SHARED_IMAGE_USAGE_WEBGPU;
192   VkImageUsageFlags vk_usage = VK_IMAGE_USAGE_SAMPLED_BIT;
193   if (usage & kUsageNeedsColorAttachment) {
194     vk_usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
195     if (format == viz::ETC1) {
196       DLOG(ERROR) << "ETC1 format cannot be used as color attachment.";
197       return nullptr;
198     }
199   }
200 
201   if (is_transfer_dst)
202     vk_usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
203 
204   // Requested usage flags must be supported.
205   DCHECK_EQ(vk_usage & image_usage_cache->optimal_tiling_usage[format],
206             vk_usage);
207 
208   if (is_external && (usage & SHARED_IMAGE_USAGE_GLES2)) {
209     // Must request all available image usage flags if aliasing GL texture. This
210     // is a spec requirement per EXT_memory_object. However, if
211     // ANGLE_memory_object_flags is supported, usage flags can be arbitrary.
212     if (UseMinimalUsageFlags(context_state.get())) {
213       // The following additional usage flags are provided for ANGLE:
214       //
215       // - TRANSFER_SRC: Used for copies from this image.
216       // - TRANSFER_DST: Used for copies to this image or clears.
217       vk_usage |=
218           VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
219     } else {
220       vk_usage |= image_usage_cache->optimal_tiling_usage[format];
221     }
222   }
223 
224   if (is_external && (usage & SHARED_IMAGE_USAGE_WEBGPU)) {
225     // The following additional usage flags are provided for Dawn:
226     //
227     // - TRANSFER_SRC: Used for copies from this image.
228     // - TRANSFER_DST: Used for copies to this image or clears.
229     vk_usage |=
230         VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
231   }
232 
233   if (usage & SHARED_IMAGE_USAGE_DISPLAY) {
234     // Skia currently requires all VkImages it uses to support transfers
235     vk_usage |=
236         VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
237   }
238 
239   auto* vulkan_implementation =
240       context_state->vk_context_provider()->GetVulkanImplementation();
241   VkImageCreateFlags vk_flags = 0;
242 
243   // In protected mode mark the image as protected, except when the image needs
244   // GLES2, but not Raster usage. ANGLE currently doesn't support protected
245   // images. Some clients request GLES2 and Raster usage (e.g. see
246   // GpuMemoryBufferVideoFramePool). In that case still allocate protected
247   // image, which ensures that image can still usable, but it may not work in
248   // some scenarios (e.g. when the video frame is used in WebGL).
249   // TODO(https://crbug.com/angleproject/4833)
250   if (vulkan_implementation->enforce_protected_memory() &&
251       (!(usage & SHARED_IMAGE_USAGE_GLES2) ||
252        (usage & SHARED_IMAGE_USAGE_RASTER))) {
253     vk_flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
254   }
255 
256   std::unique_ptr<VulkanImage> image;
257   if (is_external) {
258     image = VulkanImage::CreateWithExternalMemory(device_queue, size, vk_format,
259                                                   vk_usage, vk_flags,
260                                                   VK_IMAGE_TILING_OPTIMAL);
261   } else {
262     image = VulkanImage::Create(device_queue, size, vk_format, vk_usage,
263                                 vk_flags, VK_IMAGE_TILING_OPTIMAL);
264   }
265   if (!image)
266     return nullptr;
267 
268   bool use_separate_gl_texture =
269       UseSeparateGLTexture(context_state.get(), format);
270   auto backing = std::make_unique<ExternalVkImageBacking>(
271       util::PassKey<ExternalVkImageBacking>(), mailbox, format, size,
272       color_space, surface_origin, alpha_type, usage, std::move(context_state),
273       std::move(image), command_pool, use_separate_gl_texture);
274 
275   if (!pixel_data.empty()) {
276     size_t stride = BitsPerPixel(format) / 8 * size.width();
277     backing->WritePixelsWithData(pixel_data, stride);
278   }
279 
280   return backing;
281 }
282 
283 // static
CreateFromGMB(scoped_refptr<SharedContextState> context_state,VulkanCommandPool * command_pool,const Mailbox & mailbox,gfx::GpuMemoryBufferHandle handle,gfx::BufferFormat buffer_format,const gfx::Size & size,const gfx::ColorSpace & color_space,GrSurfaceOrigin surface_origin,SkAlphaType alpha_type,uint32_t usage,const VulkanImageUsageCache * image_usage_cache)284 std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
285     scoped_refptr<SharedContextState> context_state,
286     VulkanCommandPool* command_pool,
287     const Mailbox& mailbox,
288     gfx::GpuMemoryBufferHandle handle,
289     gfx::BufferFormat buffer_format,
290     const gfx::Size& size,
291     const gfx::ColorSpace& color_space,
292     GrSurfaceOrigin surface_origin,
293     SkAlphaType alpha_type,
294     uint32_t usage,
295     const VulkanImageUsageCache* image_usage_cache) {
296   if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
297     DLOG(ERROR) << "Invalid image size for format.";
298     return nullptr;
299   }
300 
301   auto* vulkan_implementation =
302       context_state->vk_context_provider()->GetVulkanImplementation();
303   auto resource_format = viz::GetResourceFormat(buffer_format);
304   if (vulkan_implementation->CanImportGpuMemoryBuffer(handle.type)) {
305     auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
306     VkFormat vk_format = ToVkFormat(resource_format);
307     auto image = vulkan_implementation->CreateImageFromGpuMemoryHandle(
308         device_queue, std::move(handle), size, vk_format);
309     if (!image) {
310       DLOG(ERROR) << "Failed to create VkImage from GpuMemoryHandle.";
311       return nullptr;
312     }
313 
314     bool use_separate_gl_texture =
315         UseSeparateGLTexture(context_state.get(), resource_format);
316     auto backing = std::make_unique<ExternalVkImageBacking>(
317         util::PassKey<ExternalVkImageBacking>(), mailbox, resource_format, size,
318         color_space, surface_origin, alpha_type, usage,
319         std::move(context_state), std::move(image), command_pool,
320         use_separate_gl_texture);
321     backing->SetCleared();
322     return backing;
323   }
324 
325   if (gfx::NumberOfPlanesForLinearBufferFormat(buffer_format) != 1) {
326     DLOG(ERROR) << "Invalid image format.";
327     return nullptr;
328   }
329 
330   DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
331 
332   SharedMemoryRegionWrapper shared_memory_wrapper;
333   if (!shared_memory_wrapper.Initialize(handle, size, resource_format))
334     return nullptr;
335 
336   auto backing = Create(std::move(context_state), command_pool, mailbox,
337                         resource_format, size, color_space, surface_origin,
338                         alpha_type, usage, image_usage_cache,
339                         base::span<const uint8_t>(), true /* using_gmb */);
340   if (!backing)
341     return nullptr;
342 
343   backing->InstallSharedMemory(std::move(shared_memory_wrapper));
344   return backing;
345 }
346 
ExternalVkImageBacking(util::PassKey<ExternalVkImageBacking>,const Mailbox & mailbox,viz::ResourceFormat format,const gfx::Size & size,const gfx::ColorSpace & color_space,GrSurfaceOrigin surface_origin,SkAlphaType alpha_type,uint32_t usage,scoped_refptr<SharedContextState> context_state,std::unique_ptr<VulkanImage> image,VulkanCommandPool * command_pool,bool use_separate_gl_texture)347 ExternalVkImageBacking::ExternalVkImageBacking(
348     util::PassKey<ExternalVkImageBacking>,
349     const Mailbox& mailbox,
350     viz::ResourceFormat format,
351     const gfx::Size& size,
352     const gfx::ColorSpace& color_space,
353     GrSurfaceOrigin surface_origin,
354     SkAlphaType alpha_type,
355     uint32_t usage,
356     scoped_refptr<SharedContextState> context_state,
357     std::unique_ptr<VulkanImage> image,
358     VulkanCommandPool* command_pool,
359     bool use_separate_gl_texture)
360     : ClearTrackingSharedImageBacking(mailbox,
361                                       format,
362                                       size,
363                                       color_space,
364                                       surface_origin,
365                                       alpha_type,
366                                       usage,
367                                       image->device_size(),
368                                       false /* is_thread_safe */),
369       context_state_(std::move(context_state)),
370       image_(std::move(image)),
371       backend_texture_(size.width(),
372                        size.height(),
373                        CreateGrVkImageInfo(image_.get())),
374       command_pool_(command_pool),
375       use_separate_gl_texture_(use_separate_gl_texture) {}
376 
~ExternalVkImageBacking()377 ExternalVkImageBacking::~ExternalVkImageBacking() {
378   auto semaphores = std::move(read_semaphores_);
379   if (write_semaphore_)
380     semaphores.emplace_back(std::move(write_semaphore_));
381 
382   if (!semaphores.empty() && !context_state()->gr_context()->abandoned()) {
383     WaitSemaphoresOnGrContext(context_state()->gr_context(), &semaphores);
384     ReturnPendingSemaphoresWithFenceHelper(std::move(semaphores));
385   }
386 
387   fence_helper()->EnqueueVulkanObjectCleanupForSubmittedWork(std::move(image_));
388   backend_texture_ = GrBackendTexture();
389 
390   if (texture_) {
391     // Ensure that a context is current before removing the ref and calling
392     // glDeleteTextures.
393     if (!gl::GLContext::GetCurrent())
394       context_state()->MakeCurrent(nullptr, true /* need_gl */);
395     texture_->RemoveLightweightRef(have_context());
396   }
397 
398   if (texture_passthrough_) {
399     // Ensure that a context is current before releasing |texture_passthrough_|,
400     // it calls glDeleteTextures.
401     if (!gl::GLContext::GetCurrent())
402       context_state()->MakeCurrent(nullptr, true /* need_gl */);
403     if (!have_context())
404       texture_passthrough_->MarkContextLost();
405     texture_passthrough_ = nullptr;
406   }
407 }
408 
BeginAccess(bool readonly,std::vector<ExternalSemaphore> * external_semaphores,bool is_gl)409 bool ExternalVkImageBacking::BeginAccess(
410     bool readonly,
411     std::vector<ExternalSemaphore>* external_semaphores,
412     bool is_gl) {
413   DLOG_IF(ERROR, gl_reads_in_progress_ != 0 && !is_gl)
414       << "Backing is being accessed by both GL and Vulkan.";
415   // Do not need do anything for the second and following GL read access.
416   if (is_gl && readonly && gl_reads_in_progress_) {
417     ++gl_reads_in_progress_;
418     return true;
419   }
420 
421   if (readonly && !reads_in_progress_) {
422     UpdateContent(kInVkImage);
423     if (texture_ || texture_passthrough_)
424       UpdateContent(kInGLTexture);
425   }
426 
427   if (gl_reads_in_progress_ && need_synchronization()) {
428     // To avoid concurrent read access from both GL and vulkan, if there is
429     // unfinished GL read access, we will release the GL texture temporarily.
430     // And when this vulkan access is over, we will acquire the GL texture to
431     // resume the GL access.
432     DCHECK(!is_gl);
433     DCHECK(readonly);
434     DCHECK(texture_passthrough_ || texture_);
435 
436     GLuint texture_id = texture_passthrough_
437                             ? texture_passthrough_->service_id()
438                             : texture_->service_id();
439     if (!gl::GLContext::GetCurrent())
440       context_state()->MakeCurrent(/*gl_surface=*/nullptr, /*needs_gl=*/true);
441 
442     GrVkImageInfo info;
443     auto result = backend_texture_.getVkImageInfo(&info);
444     DCHECK(result);
445     DCHECK_EQ(info.fCurrentQueueFamily, VK_QUEUE_FAMILY_EXTERNAL);
446     DCHECK_NE(info.fImageLayout, VK_IMAGE_LAYOUT_UNDEFINED);
447     DCHECK_NE(info.fImageLayout, VK_IMAGE_LAYOUT_PREINITIALIZED);
448     auto release_semaphore =
449         ExternalVkImageGLRepresentationShared::ReleaseTexture(
450             external_semaphore_pool(), texture_id, info.fImageLayout);
451     EndAccessInternal(readonly, std::move(release_semaphore));
452   }
453 
454   if (!BeginAccessInternal(readonly, external_semaphores))
455     return false;
456 
457   if (!is_gl)
458     return true;
459 
460   if (need_synchronization() && external_semaphores->empty()) {
461     // For the first time GL BeginAccess(), external_semaphores could be empty,
462     // since the Vulkan usage will not provide semaphore for EndAccess() call,
463     // if ProduceGL*() is never called. In this case, image layout and queue
464     // family will not be ready for GL access as well.
465     auto* gr_context = context_state()->gr_context();
466     gr_context->setBackendTextureState(
467         backend_texture_,
468         GrBackendSurfaceMutableState(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
469                                      VK_QUEUE_FAMILY_EXTERNAL));
470     auto semaphore = external_semaphore_pool()->GetOrCreateSemaphore();
471     VkSemaphore vk_semaphore = semaphore.GetVkSemaphore();
472     GrBackendSemaphore backend_semaphore;
473     backend_semaphore.initVulkan(vk_semaphore);
474     GrFlushInfo flush_info = {
475         .fNumSemaphores = 1,
476         .fSignalSemaphores = &backend_semaphore,
477     };
478     gpu::AddVulkanCleanupTaskForSkiaFlush(
479         context_state()->vk_context_provider(), &flush_info);
480     auto flush_result = gr_context->flush(flush_info);
481     DCHECK_EQ(flush_result, GrSemaphoresSubmitted::kYes);
482     gr_context->submit();
483     external_semaphores->push_back(std::move(semaphore));
484   }
485 
486   if (readonly) {
487     DCHECK(!gl_reads_in_progress_);
488     gl_reads_in_progress_ = 1;
489   }
490   return true;
491 }
492 
EndAccess(bool readonly,ExternalSemaphore external_semaphore,bool is_gl)493 void ExternalVkImageBacking::EndAccess(bool readonly,
494                                        ExternalSemaphore external_semaphore,
495                                        bool is_gl) {
496   if (is_gl && readonly) {
497     DCHECK(gl_reads_in_progress_);
498     if (--gl_reads_in_progress_ > 0) {
499       DCHECK(!external_semaphore);
500       return;
501     }
502   }
503 
504   EndAccessInternal(readonly, std::move(external_semaphore));
505   if (!readonly) {
506     if (use_separate_gl_texture()) {
507       latest_content_ = is_gl ? kInGLTexture : kInVkImage;
508     } else {
509       latest_content_ = kInVkImage | kInGLTexture;
510     }
511   }
512 
513   if (gl_reads_in_progress_ && need_synchronization()) {
514     // When vulkan read access is finished, if there is unfinished GL read
515     // access, we need to resume GL read access.
516     DCHECK(!is_gl);
517     DCHECK(readonly);
518     DCHECK(texture_passthrough_ || texture_);
519     GLuint texture_id = texture_passthrough_
520                             ? texture_passthrough_->service_id()
521                             : texture_->service_id();
522     if (!gl::GLContext::GetCurrent())
523       context_state()->MakeCurrent(/*gl_surface=*/nullptr, /*needs_gl=*/true);
524     std::vector<ExternalSemaphore> external_semaphores;
525     BeginAccessInternal(true, &external_semaphores);
526     DCHECK_LE(external_semaphores.size(), 1u);
527 
528     for (auto& semaphore : external_semaphores) {
529       GrVkImageInfo info;
530       auto result = backend_texture_.getVkImageInfo(&info);
531       DCHECK(result);
532       DCHECK_EQ(info.fCurrentQueueFamily, VK_QUEUE_FAMILY_EXTERNAL);
533       DCHECK_NE(info.fImageLayout, VK_IMAGE_LAYOUT_UNDEFINED);
534       DCHECK_NE(info.fImageLayout, VK_IMAGE_LAYOUT_PREINITIALIZED);
535       ExternalVkImageGLRepresentationShared::AcquireTexture(
536           &semaphore, texture_id, info.fImageLayout);
537     }
538     // |external_semaphores| has been waited on a GL context, so it can not be
539     // reused until a vulkan GPU work depends on the following GL task is over.
540     // So add it to the pending semaphores list, and they will be returned to
541     // external semaphores pool when the next skia access is over.
542     AddSemaphoresToPendingListOrRelease(std::move(external_semaphores));
543   }
544 }
545 
Update(std::unique_ptr<gfx::GpuFence> in_fence)546 void ExternalVkImageBacking::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
547   DCHECK(!in_fence);
548   latest_content_ = kInSharedMemory;
549   SetCleared();
550 }
551 
ProduceLegacyMailbox(MailboxManager * mailbox_manager)552 bool ExternalVkImageBacking::ProduceLegacyMailbox(
553     MailboxManager* mailbox_manager) {
554   // It is not safe to produce a legacy mailbox because it would bypass the
555   // synchronization between Vulkan and GL that is implemented in the
556   // representation classes.
557   return false;
558 }
559 
AddSemaphoresToPendingListOrRelease(std::vector<ExternalSemaphore> semaphores)560 void ExternalVkImageBacking::AddSemaphoresToPendingListOrRelease(
561     std::vector<ExternalSemaphore> semaphores) {
562   constexpr size_t kMaxPendingSemaphores = 4;
563   DCHECK_LE(pending_semaphores_.size(), kMaxPendingSemaphores);
564 
565 #if DCHECK_IS_ON()
566   for (auto& semaphore : semaphores)
567     DCHECK(semaphore);
568 #endif
569   while (pending_semaphores_.size() < kMaxPendingSemaphores &&
570          !semaphores.empty()) {
571     pending_semaphores_.push_back(std::move(semaphores.back()));
572     semaphores.pop_back();
573   }
574   if (!semaphores.empty()) {
575     // |semaphores| may contain VkSemephores which are submitted to queue for
576     // signalling but have not been signalled. In that case, we have to release
577     // them via fence helper to make sure all submitted GPU works is finished
578     // before releasing them.
579     fence_helper()->EnqueueCleanupTaskForSubmittedWork(base::BindOnce(
580         [](scoped_refptr<SharedContextState> shared_context_state,
581            std::vector<ExternalSemaphore>, VulkanDeviceQueue* device_queue,
582            bool device_lost) {
583           if (!gl::GLContext::GetCurrent()) {
584             shared_context_state->MakeCurrent(/*surface=*/nullptr,
585                                               /*needs_gl=*/true);
586           }
587         },
588         context_state_, std::move(semaphores)));
589   }
590 }
591 
GetNativePixmap()592 scoped_refptr<gfx::NativePixmap> ExternalVkImageBacking::GetNativePixmap() {
593   return image_->native_pixmap();
594 }
595 
ReturnPendingSemaphoresWithFenceHelper(std::vector<ExternalSemaphore> semaphores)596 void ExternalVkImageBacking::ReturnPendingSemaphoresWithFenceHelper(
597     std::vector<ExternalSemaphore> semaphores) {
598   std::move(semaphores.begin(), semaphores.end(),
599             std::back_inserter(pending_semaphores_));
600   external_semaphore_pool()->ReturnSemaphoresWithFenceHelper(
601       std::move(pending_semaphores_));
602   pending_semaphores_.clear();
603 }
604 
605 std::unique_ptr<SharedImageRepresentationDawn>
ProduceDawn(SharedImageManager * manager,MemoryTypeTracker * tracker,WGPUDevice wgpuDevice)606 ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager,
607                                     MemoryTypeTracker* tracker,
608                                     WGPUDevice wgpuDevice) {
609 #if (defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_BSD)) && BUILDFLAG(USE_DAWN)
610   auto wgpu_format = viz::ToWGPUFormat(format());
611 
612   if (wgpu_format == WGPUTextureFormat_Undefined) {
613     DLOG(ERROR) << "Format not supported for Dawn";
614     return nullptr;
615   }
616 
617   GrVkImageInfo image_info;
618   bool result = backend_texture_.getVkImageInfo(&image_info);
619   DCHECK(result);
620 
621   auto memory_fd = image_->GetMemoryFd();
622   if (!memory_fd.is_valid()) {
623     return nullptr;
624   }
625 
626   return std::make_unique<ExternalVkImageDawnRepresentation>(
627       manager, this, tracker, wgpuDevice, wgpu_format, std::move(memory_fd));
628 #else  // (!defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(OS_BSD)) || !BUILDFLAG(USE_DAWN)
629   NOTIMPLEMENTED_LOG_ONCE();
630   return nullptr;
631 #endif
632 }
633 
ProduceGLTextureInternal()634 GLuint ExternalVkImageBacking::ProduceGLTextureInternal() {
635   GrVkImageInfo image_info;
636   bool result = backend_texture_.getVkImageInfo(&image_info);
637   DCHECK(result);
638   gl::GLApi* api = gl::g_current_gl_context;
639   base::Optional<ScopedDedicatedMemoryObject> memory_object;
640   if (!use_separate_gl_texture()) {
641 #if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID) || defined(OS_BSD)
642     auto memory_fd = image_->GetMemoryFd();
643     if (!memory_fd.is_valid())
644       return 0;
645     memory_object.emplace(api);
646     api->glImportMemoryFdEXTFn(memory_object->id(), image_info.fAlloc.fSize,
647                                GL_HANDLE_TYPE_OPAQUE_FD_EXT,
648                                memory_fd.release());
649 #elif defined(OS_WIN)
650     auto memory_handle = image_->GetMemoryHandle();
651     if (!memory_handle.IsValid()) {
652       return 0;
653     }
654     memory_object.emplace(api);
655     api->glImportMemoryWin32HandleEXTFn(
656         memory_object->id(), image_info.fAlloc.fSize,
657         GL_HANDLE_TYPE_OPAQUE_WIN32_EXT, memory_handle.Take());
658 #elif defined(OS_FUCHSIA)
659     zx::vmo vmo = image_->GetMemoryZirconHandle();
660     if (!vmo)
661       return 0;
662     memory_object.emplace(api);
663     api->glImportMemoryZirconHandleANGLEFn(
664         memory_object->id(), image_info.fAlloc.fSize,
665         GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE, vmo.release());
666 #else
667 #error Unsupported OS
668 #endif
669   }
670 
671   GLuint internal_format = viz::TextureStorageFormat(format());
672   GLuint texture_service_id = 0;
673   api->glGenTexturesFn(1, &texture_service_id);
674   gl::ScopedTextureBinder scoped_texture_binder(GL_TEXTURE_2D,
675                                                 texture_service_id);
676   api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
677   api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
678   api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
679   api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
680   if (use_separate_gl_texture()) {
681     DCHECK(!memory_object);
682     api->glTexStorage2DEXTFn(GL_TEXTURE_2D, 1, internal_format, size().width(),
683                              size().height());
684   } else {
685     DCHECK(memory_object);
686     // If ANGLE_memory_object_flags is supported, use that to communicate the
687     // exact create and usage flags the image was created with.
688     DCHECK(image_->usage() != 0);
689     if (UseMinimalUsageFlags(context_state())) {
690       api->glTexStorageMemFlags2DANGLEFn(
691           GL_TEXTURE_2D, 1, internal_format, size().width(), size().height(),
692           memory_object->id(), 0, image_->flags(), image_->usage());
693     } else {
694       api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format,
695                                   size().width(), size().height(),
696                                   memory_object->id(), 0);
697     }
698   }
699 
700   return texture_service_id;
701 }
702 
703 std::unique_ptr<SharedImageRepresentationGLTexture>
ProduceGLTexture(SharedImageManager * manager,MemoryTypeTracker * tracker)704 ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
705                                          MemoryTypeTracker* tracker) {
706   DCHECK(!texture_passthrough_);
707   if (!(usage() & SHARED_IMAGE_USAGE_GLES2)) {
708     DLOG(ERROR) << "The backing is not created with GLES2 usage.";
709     return nullptr;
710   }
711 
712   if (!texture_) {
713     GLuint texture_service_id = ProduceGLTextureInternal();
714     if (!texture_service_id)
715       return nullptr;
716     GLuint internal_format = viz::TextureStorageFormat(format());
717     GLenum gl_format = viz::GLDataFormat(format());
718     GLenum gl_type = viz::GLDataType(format());
719 
720     texture_ = new gles2::Texture(texture_service_id);
721     texture_->SetLightweightRef();
722     texture_->SetTarget(GL_TEXTURE_2D, 1);
723     texture_->set_min_filter(GL_LINEAR);
724     texture_->set_mag_filter(GL_LINEAR);
725     texture_->set_wrap_t(GL_CLAMP_TO_EDGE);
726     texture_->set_wrap_s(GL_CLAMP_TO_EDGE);
727     // If the backing is already cleared, no need to clear it again.
728     gfx::Rect cleared_rect;
729     if (IsCleared())
730       cleared_rect = gfx::Rect(size());
731 
732     texture_->SetLevelInfo(GL_TEXTURE_2D, 0, internal_format, size().width(),
733                            size().height(), 1, 0, gl_format, gl_type,
734                            cleared_rect);
735     texture_->SetImmutable(true, true);
736   }
737   return std::make_unique<ExternalVkImageGLRepresentation>(
738       manager, this, tracker, texture_, texture_->service_id());
739 }
740 
741 std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
ProduceGLTexturePassthrough(SharedImageManager * manager,MemoryTypeTracker * tracker)742 ExternalVkImageBacking::ProduceGLTexturePassthrough(
743     SharedImageManager* manager,
744     MemoryTypeTracker* tracker) {
745   DCHECK(!texture_);
746   if (!(usage() & SHARED_IMAGE_USAGE_GLES2)) {
747     DLOG(ERROR) << "The backing is not created with GLES2 usage.";
748     return nullptr;
749   }
750 
751   if (!texture_passthrough_) {
752     GLuint texture_service_id = ProduceGLTextureInternal();
753     if (!texture_service_id)
754       return nullptr;
755     GLuint internal_format = viz::TextureStorageFormat(format());
756     GLenum gl_format = viz::GLDataFormat(format());
757     GLenum gl_type = viz::GLDataType(format());
758 
759     texture_passthrough_ = base::MakeRefCounted<gpu::gles2::TexturePassthrough>(
760         texture_service_id, GL_TEXTURE_2D, internal_format, size().width(),
761         size().height(),
762         /*depth=*/1, /*border=*/0, gl_format, gl_type);
763   }
764 
765   return std::make_unique<ExternalVkImageGLPassthroughRepresentation>(
766       manager, this, tracker, texture_passthrough_->service_id());
767 }
768 
769 std::unique_ptr<SharedImageRepresentationSkia>
ProduceSkia(SharedImageManager * manager,MemoryTypeTracker * tracker,scoped_refptr<SharedContextState> context_state)770 ExternalVkImageBacking::ProduceSkia(
771     SharedImageManager* manager,
772     MemoryTypeTracker* tracker,
773     scoped_refptr<SharedContextState> context_state) {
774   // This backing type is only used when vulkan is enabled, so SkiaRenderer
775   // should also be using Vulkan.
776   DCHECK_EQ(context_state_, context_state);
777   DCHECK(context_state->GrContextIsVulkan());
778   return std::make_unique<ExternalVkImageSkiaRepresentation>(manager, this,
779                                                              tracker);
780 }
781 
782 std::unique_ptr<SharedImageRepresentationOverlay>
ProduceOverlay(SharedImageManager * manager,MemoryTypeTracker * tracker)783 ExternalVkImageBacking::ProduceOverlay(SharedImageManager* manager,
784                                        MemoryTypeTracker* tracker) {
785   return std::make_unique<ExternalVkImageOverlayRepresentation>(manager, this,
786                                                                 tracker);
787 }
788 
InstallSharedMemory(SharedMemoryRegionWrapper shared_memory_wrapper)789 void ExternalVkImageBacking::InstallSharedMemory(
790     SharedMemoryRegionWrapper shared_memory_wrapper) {
791   DCHECK(!shared_memory_wrapper_.IsValid());
792   DCHECK(shared_memory_wrapper.IsValid());
793   shared_memory_wrapper_ = std::move(shared_memory_wrapper);
794   Update(nullptr);
795 }
796 
UpdateContent(uint32_t content_flags)797 void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
798   // Only support one backing for now.
799   DCHECK(content_flags == kInVkImage || content_flags == kInGLTexture ||
800          content_flags == kInSharedMemory);
801 
802   if ((latest_content_ & content_flags) == content_flags)
803     return;
804 
805   if (content_flags == kInGLTexture && !use_separate_gl_texture())
806     content_flags = kInVkImage;
807 
808   if (content_flags == kInVkImage) {
809     if (latest_content_ & kInSharedMemory) {
810       if (!shared_memory_wrapper_.IsValid())
811         return;
812       if (!WritePixels())
813         return;
814       latest_content_ |=
815           use_separate_gl_texture() ? kInVkImage : kInVkImage | kInGLTexture;
816       return;
817     }
818     if ((latest_content_ & kInGLTexture) && use_separate_gl_texture()) {
819       CopyPixelsFromGLTextureToVkImage();
820       latest_content_ |= kInVkImage;
821       return;
822     }
823   } else if (content_flags == kInGLTexture) {
824     DCHECK(use_separate_gl_texture());
825     if (latest_content_ & kInSharedMemory) {
826       CopyPixelsFromShmToGLTexture();
827     } else if (latest_content_ & kInVkImage) {
828       NOTIMPLEMENTED_LOG_ONCE();
829     }
830   } else if (content_flags == kInSharedMemory) {
831     // TODO(penghuang): read pixels back from VkImage to shared memory GMB, if
832     // this feature is needed.
833     NOTIMPLEMENTED_LOG_ONCE();
834   }
835 }
836 
WritePixelsWithCallback(size_t data_size,size_t stride,FillBufferCallback callback)837 bool ExternalVkImageBacking::WritePixelsWithCallback(
838     size_t data_size,
839     size_t stride,
840     FillBufferCallback callback) {
841   DCHECK(stride == 0 || size().height() * stride <= data_size);
842 
843   VkBufferCreateInfo buffer_create_info = {
844       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
845       .size = data_size,
846       .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
847       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
848   };
849 
850   VmaAllocator allocator =
851       context_state()->vk_context_provider()->GetDeviceQueue()->vma_allocator();
852   VkBuffer stage_buffer = VK_NULL_HANDLE;
853   VmaAllocation stage_allocation = VK_NULL_HANDLE;
854   VkResult result = vma::CreateBuffer(allocator, &buffer_create_info,
855                                       VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
856                                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
857                                       0, &stage_buffer, &stage_allocation);
858   if (result != VK_SUCCESS) {
859     DLOG(ERROR) << "vkCreateBuffer() failed." << result;
860     return false;
861   }
862 
863   void* buffer = nullptr;
864   result = vma::MapMemory(allocator, stage_allocation, &buffer);
865   if (result != VK_SUCCESS) {
866     DLOG(ERROR) << "vma::MapMemory() failed. " << result;
867     vma::DestroyBuffer(allocator, stage_buffer, stage_allocation);
868     return false;
869   }
870 
871   std::move(callback).Run(buffer);
872   vma::UnmapMemory(allocator, stage_allocation);
873 
874   std::vector<ExternalSemaphore> external_semaphores;
875   if (!BeginAccessInternal(false /* readonly */, &external_semaphores)) {
876     DLOG(ERROR) << "BeginAccess() failed.";
877     vma::DestroyBuffer(allocator, stage_buffer, stage_allocation);
878     return false;
879   }
880 
881   auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
882   CHECK(command_buffer);
883   {
884     ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
885     GrVkImageInfo image_info;
886     bool success = backend_texture_.getVkImageInfo(&image_info);
887     DCHECK(success);
888     if (image_info.fImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
889       command_buffer->TransitionImageLayout(
890           image_info.fImage, image_info.fImageLayout,
891           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
892       backend_texture_.setVkImageLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
893     }
894     uint32_t buffer_width =
895         stride ? stride * 8 / BitsPerPixel(format()) : size().width();
896     command_buffer->CopyBufferToImage(stage_buffer, image_info.fImage,
897                                       buffer_width, size().height(),
898                                       size().width(), size().height());
899   }
900 
901   SetCleared();
902 
903   if (!need_synchronization()) {
904     DCHECK(external_semaphores.empty());
905     command_buffer->Submit(0, nullptr, 0, nullptr);
906     EndAccessInternal(false /* readonly */, ExternalSemaphore());
907 
908     fence_helper()->EnqueueVulkanObjectCleanupForSubmittedWork(
909         std::move(command_buffer));
910     fence_helper()->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
911                                                          stage_allocation);
912     return true;
913   }
914 
915   std::vector<VkSemaphore> begin_access_semaphores;
916   begin_access_semaphores.reserve(external_semaphores.size());
917   for (auto& external_semaphore : external_semaphores) {
918     begin_access_semaphores.emplace_back(external_semaphore.GetVkSemaphore());
919   }
920 
921   auto end_access_semaphore = external_semaphore_pool()->GetOrCreateSemaphore();
922   VkSemaphore vk_end_access_semaphore = end_access_semaphore.GetVkSemaphore();
923   command_buffer->Submit(begin_access_semaphores.size(),
924                          begin_access_semaphores.data(), 1,
925                          &vk_end_access_semaphore);
926 
927   EndAccessInternal(false /* readonly */, std::move(end_access_semaphore));
928   // |external_semaphores| have been waited on and can be reused when submitted
929   // GPU work is done.
930   ReturnPendingSemaphoresWithFenceHelper(std::move(external_semaphores));
931 
932   fence_helper()->EnqueueVulkanObjectCleanupForSubmittedWork(
933       std::move(command_buffer));
934   fence_helper()->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
935                                                        stage_allocation);
936   return true;
937 }
938 
WritePixelsWithData(base::span<const uint8_t> pixel_data,size_t stride)939 bool ExternalVkImageBacking::WritePixelsWithData(
940     base::span<const uint8_t> pixel_data,
941     size_t stride) {
942   std::vector<ExternalSemaphore> external_semaphores;
943   if (!BeginAccessInternal(false /* readonly */, &external_semaphores)) {
944     DLOG(ERROR) << "BeginAccess() failed.";
945     return false;
946   }
947   auto* gr_context = context_state_->gr_context();
948   WaitSemaphoresOnGrContext(gr_context, &external_semaphores);
949 
950   auto info = SkImageInfo::Make(size().width(), size().height(),
951                                 ResourceFormatToClosestSkColorType(
952                                     /*gpu_compositing=*/true, format()),
953                                 kOpaque_SkAlphaType);
954   SkPixmap pixmap(info, pixel_data.data(), stride);
955   if (!gr_context->updateBackendTexture(backend_texture_, &pixmap,
956                                         /*levels=*/1, nullptr, nullptr)) {
957     DLOG(ERROR) << "updateBackendTexture() failed.";
958   }
959 
960   if (!need_synchronization()) {
961     DCHECK(external_semaphores.empty());
962     EndAccessInternal(false /* readonly */, ExternalSemaphore());
963     return true;
964   }
965 
966   gr_context->flush({});
967   gr_context->setBackendTextureState(
968       backend_texture_,
969       GrBackendSurfaceMutableState(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
970                                    VK_QUEUE_FAMILY_EXTERNAL));
971 
972   auto end_access_semaphore = external_semaphore_pool()->GetOrCreateSemaphore();
973   VkSemaphore vk_end_access_semaphore = end_access_semaphore.GetVkSemaphore();
974   GrBackendSemaphore end_access_backend_semaphore;
975   end_access_backend_semaphore.initVulkan(vk_end_access_semaphore);
976   GrFlushInfo flush_info = {
977       .fNumSemaphores = 1,
978       .fSignalSemaphores = &end_access_backend_semaphore,
979   };
980   gr_context->flush(flush_info);
981 
982   // Submit so the |end_access_semaphore| is ready for waiting.
983   gr_context->submit();
984 
985   EndAccessInternal(false /* readonly */, std::move(end_access_semaphore));
986   // |external_semaphores| have been waited on and can be reused when submitted
987   // GPU work is done.
988   ReturnPendingSemaphoresWithFenceHelper(std::move(external_semaphores));
989   return true;
990 }
991 
WritePixels()992 bool ExternalVkImageBacking::WritePixels() {
993   return WritePixelsWithData(shared_memory_wrapper_.GetMemoryAsSpan(),
994                              shared_memory_wrapper_.GetStride());
995 }
996 
CopyPixelsFromGLTextureToVkImage()997 void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
998   DCHECK(use_separate_gl_texture());
999   DCHECK_NE(!!texture_, !!texture_passthrough_);
1000   const GLuint texture_service_id =
1001       texture_ ? texture_->service_id() : texture_passthrough_->service_id();
1002 
1003   DCHECK_GE(format(), 0);
1004   DCHECK_LE(format(), viz::RESOURCE_FORMAT_MAX);
1005   auto gl_format = kFormatTable[format()].gl_format;
1006   auto gl_type = kFormatTable[format()].gl_type;
1007   auto bytes_per_pixel = kFormatTable[format()].bytes_per_pixel;
1008 
1009   if (gl_format == GL_ZERO) {
1010     NOTREACHED() << "Not supported resource format=" << format();
1011     return;
1012   }
1013 
1014   // Make sure GrContext is not using GL. So we don't need reset GrContext
1015   DCHECK(!context_state_->GrContextIsGL());
1016 
1017   // Make sure a gl context is current, since textures are shared between all gl
1018   // contexts, we don't care which gl context is current.
1019   if (!gl::GLContext::GetCurrent() &&
1020       !context_state_->MakeCurrent(nullptr, true /* needs_gl */))
1021     return;
1022 
1023   gl::GLApi* api = gl::g_current_gl_context;
1024   GLuint framebuffer;
1025   GLint old_framebuffer;
1026   api->glGetIntegervFn(GL_READ_FRAMEBUFFER_BINDING, &old_framebuffer);
1027   api->glGenFramebuffersEXTFn(1, &framebuffer);
1028   api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, framebuffer);
1029   api->glFramebufferTexture2DEXTFn(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
1030                                    GL_TEXTURE_2D, texture_service_id, 0);
1031   GLenum status = api->glCheckFramebufferStatusEXTFn(GL_READ_FRAMEBUFFER);
1032   DCHECK_EQ(status, static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE))
1033       << "CheckFramebufferStatusEXT() failed.";
1034 
1035   base::CheckedNumeric<size_t> checked_size = bytes_per_pixel;
1036   checked_size *= size().width();
1037   checked_size *= size().height();
1038   DCHECK(checked_size.IsValid());
1039 
1040   ScopedPixelStore pack_row_length(api, GL_PACK_ROW_LENGTH, 0);
1041   ScopedPixelStore pack_skip_pixels(api, GL_PACK_SKIP_PIXELS, 0);
1042   ScopedPixelStore pack_skip_rows(api, GL_PACK_SKIP_ROWS, 0);
1043   ScopedPixelStore pack_alignment(api, GL_PACK_ALIGNMENT, 1);
1044 
1045   WritePixelsWithCallback(
1046       checked_size.ValueOrDie(), 0,
1047       base::BindOnce(
1048           [](gl::GLApi* api, const gfx::Size& size, GLenum format, GLenum type,
1049              void* buffer) {
1050             api->glReadPixelsFn(0, 0, size.width(), size.height(), format, type,
1051                                 buffer);
1052             DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
1053           },
1054           api, size(), gl_format, gl_type));
1055   api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, old_framebuffer);
1056   api->glDeleteFramebuffersEXTFn(1, &framebuffer);
1057 }
1058 
CopyPixelsFromShmToGLTexture()1059 void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() {
1060   DCHECK(use_separate_gl_texture());
1061   DCHECK_NE(!!texture_, !!texture_passthrough_);
1062   const GLuint texture_service_id =
1063       texture_ ? texture_->service_id() : texture_passthrough_->service_id();
1064 
1065   DCHECK_GE(format(), 0);
1066   DCHECK_LE(format(), viz::RESOURCE_FORMAT_MAX);
1067   auto gl_format = kFormatTable[format()].gl_format;
1068   auto gl_type = kFormatTable[format()].gl_type;
1069   auto bytes_per_pixel = kFormatTable[format()].bytes_per_pixel;
1070 
1071   if (gl_format == GL_ZERO) {
1072     NOTREACHED() << "Not supported resource format=" << format();
1073     return;
1074   }
1075 
1076   // Make sure GrContext is not using GL. So we don't need reset GrContext
1077   DCHECK(!context_state_->GrContextIsGL());
1078 
1079   // Make sure a gl context is current, since textures are shared between all gl
1080   // contexts, we don't care which gl context is current.
1081   if (!gl::GLContext::GetCurrent() &&
1082       !context_state_->MakeCurrent(nullptr, true /* needs_gl */))
1083     return;
1084 
1085   gl::GLApi* api = gl::g_current_gl_context;
1086   GLint old_texture;
1087   api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &old_texture);
1088   api->glBindTextureFn(GL_TEXTURE_2D, texture_service_id);
1089 
1090   base::CheckedNumeric<size_t> checked_size = bytes_per_pixel;
1091   checked_size *= size().width();
1092   checked_size *= size().height();
1093   DCHECK(checked_size.IsValid());
1094 
1095   auto pixel_data = shared_memory_wrapper_.GetMemoryAsSpan();
1096   api->glTexSubImage2DFn(GL_TEXTURE_2D, 0, 0, 0, size().width(),
1097                          size().height(), gl_format, gl_type,
1098                          pixel_data.data());
1099   DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
1100   api->glBindTextureFn(GL_TEXTURE_2D, old_texture);
1101 }
1102 
BeginAccessInternal(bool readonly,std::vector<ExternalSemaphore> * external_semaphores)1103 bool ExternalVkImageBacking::BeginAccessInternal(
1104     bool readonly,
1105     std::vector<ExternalSemaphore>* external_semaphores) {
1106   DCHECK(external_semaphores);
1107   DCHECK(external_semaphores->empty());
1108   if (is_write_in_progress_) {
1109     DLOG(ERROR) << "Unable to begin read or write access because another write "
1110                    "access is in progress";
1111     return false;
1112   }
1113 
1114   if (reads_in_progress_ && !readonly) {
1115     DLOG(ERROR)
1116         << "Unable to begin write access because a read access is in progress";
1117     return false;
1118   }
1119 
1120   if (readonly) {
1121     DLOG_IF(ERROR, reads_in_progress_)
1122         << "Concurrent reading may cause problem.";
1123     ++reads_in_progress_;
1124     // If a shared image is read repeatedly without any write access,
1125     // |read_semaphores_| will never be consumed and released, and then
1126     // chrome will run out of file descriptors. To avoid this problem, we wait
1127     // on read semaphores for readonly access too. And in most cases, a shared
1128     // image is only read from one vulkan device queue, so it should not have
1129     // performance impact.
1130     // TODO(penghuang): avoid waiting on read semaphores.
1131     *external_semaphores = std::move(read_semaphores_);
1132     read_semaphores_.clear();
1133 
1134     // A semaphore will become unsignaled, when it has been signaled and waited,
1135     // so it is not safe to reuse it.
1136     if (write_semaphore_)
1137       external_semaphores->push_back(std::move(write_semaphore_));
1138   } else {
1139     is_write_in_progress_ = true;
1140     *external_semaphores = std::move(read_semaphores_);
1141     read_semaphores_.clear();
1142     if (write_semaphore_)
1143       external_semaphores->push_back(std::move(write_semaphore_));
1144   }
1145   return true;
1146 }
1147 
EndAccessInternal(bool readonly,ExternalSemaphore external_semaphore)1148 void ExternalVkImageBacking::EndAccessInternal(
1149     bool readonly,
1150     ExternalSemaphore external_semaphore) {
1151   if (readonly) {
1152     DCHECK_GT(reads_in_progress_, 0u);
1153     --reads_in_progress_;
1154   } else {
1155     DCHECK(is_write_in_progress_);
1156     is_write_in_progress_ = false;
1157   }
1158 
1159   if (need_synchronization()) {
1160     DCHECK(!is_write_in_progress_);
1161     DCHECK(external_semaphore);
1162     if (readonly) {
1163       read_semaphores_.push_back(std::move(external_semaphore));
1164     } else {
1165       DCHECK(!write_semaphore_);
1166       DCHECK(read_semaphores_.empty());
1167       write_semaphore_ = std::move(external_semaphore);
1168     }
1169   } else {
1170     DCHECK(!external_semaphore);
1171   }
1172 }
1173 
1174 }  // namespace gpu
1175