1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/viz/service/display_embedder/skia_output_surface_impl.h"
6
7 #include <memory>
8 #include <utility>
9 #include <vector>
10
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback_helpers.h"
14 #include "base/no_destructor.h"
15 #include "base/synchronization/waitable_event.h"
16 #include "base/threading/thread_task_runner_handle.h"
17 #include "build/build_config.h"
18 #include "components/viz/common/frame_sinks/begin_frame_source.h"
19 #include "components/viz/common/frame_sinks/copy_output_request.h"
20 #include "components/viz/common/frame_sinks/copy_output_util.h"
21 #include "components/viz/common/resources/resource_format_utils_vulkan.h"
22 #include "components/viz/service/display/output_surface_client.h"
23 #include "components/viz/service/display/output_surface_frame.h"
24 #include "components/viz/service/display/overlay_candidate.h"
25 #include "components/viz/service/display_embedder/image_context_impl.h"
26 #include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
27 #include "components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.h"
28 #include "gpu/command_buffer/common/swap_buffers_complete_params.h"
29 #include "gpu/command_buffer/service/scheduler.h"
30 #include "gpu/command_buffer/service/shared_image_representation.h"
31 #include "gpu/command_buffer/service/skia_utils.h"
32 #include "gpu/ipc/service/context_url.h"
33 #include "gpu/ipc/single_task_sequence.h"
34 #include "gpu/vulkan/buildflags.h"
35 #include "skia/buildflags.h"
36 #include "ui/gfx/skia_util.h"
37 #include "ui/gl/gl_context.h"
38 #include "ui/gl/gl_gl_api_implementation.h"
39
40 #if BUILDFLAG(ENABLE_VULKAN)
41 #include "components/viz/common/gpu/vulkan_context_provider.h"
42 #include "gpu/vulkan/vulkan_device_queue.h"
43 #endif // BUILDFLAG(ENABLE_VULKAN)
44
45 #if defined(OS_WIN)
46 #include "components/viz/service/display/dc_layer_overlay.h"
47 #endif
48
49 namespace viz {
50
51 namespace {
52
Fulfill(void * texture_context)53 sk_sp<SkPromiseImageTexture> Fulfill(void* texture_context) {
54 DCHECK(texture_context);
55 auto* image_context = static_cast<ImageContextImpl*>(texture_context);
56 return sk_ref_sp(image_context->promise_image_texture());
57 }
58
DoNothing(void * texture_context)59 void DoNothing(void* texture_context) {}
60
GetActiveUrl()61 gpu::ContextUrl& GetActiveUrl() {
62 static base::NoDestructor<gpu::ContextUrl> active_url(
63 GURL("chrome://gpu/SkiaRenderer"));
64 return *active_url;
65 }
66
GetOutputSurfaceType(SkiaOutputSurfaceDependency * deps)67 OutputSurface::Type GetOutputSurfaceType(SkiaOutputSurfaceDependency* deps) {
68 // TODO(penghuang): Support more types.
69 return deps->IsUsingVulkan() ? OutputSurface::Type::kVulkan
70 : OutputSurface::Type::kOpenGL;
71 }
72
73 } // namespace
74
ScopedPaint(SkDeferredDisplayListRecorder * root_recorder)75 SkiaOutputSurfaceImpl::ScopedPaint::ScopedPaint(
76 SkDeferredDisplayListRecorder* root_recorder)
77 : recorder_(root_recorder), render_pass_id_(0) {}
78
ScopedPaint(SkSurfaceCharacterization characterization,RenderPassId render_pass_id)79 SkiaOutputSurfaceImpl::ScopedPaint::ScopedPaint(
80 SkSurfaceCharacterization characterization,
81 RenderPassId render_pass_id)
82 : render_pass_id_(render_pass_id) {
83 recorder_storage_.emplace(characterization);
84 recorder_ = &recorder_storage_.value();
85 }
86
87 SkiaOutputSurfaceImpl::ScopedPaint::~ScopedPaint() = default;
88
89 // static
Create(std::unique_ptr<SkiaOutputSurfaceDependency> deps,const RendererSettings & renderer_settings)90 std::unique_ptr<SkiaOutputSurface> SkiaOutputSurfaceImpl::Create(
91 std::unique_ptr<SkiaOutputSurfaceDependency> deps,
92 const RendererSettings& renderer_settings) {
93 auto output_surface = std::make_unique<SkiaOutputSurfaceImpl>(
94 util::PassKey<SkiaOutputSurfaceImpl>(), std::move(deps),
95 renderer_settings);
96 if (!output_surface->Initialize())
97 output_surface = nullptr;
98 return output_surface;
99 }
100
SkiaOutputSurfaceImpl(util::PassKey<SkiaOutputSurfaceImpl>,std::unique_ptr<SkiaOutputSurfaceDependency> deps,const RendererSettings & renderer_settings)101 SkiaOutputSurfaceImpl::SkiaOutputSurfaceImpl(
102 util::PassKey<SkiaOutputSurfaceImpl> /* pass_key */,
103 std::unique_ptr<SkiaOutputSurfaceDependency> deps,
104 const RendererSettings& renderer_settings)
105 : SkiaOutputSurface(GetOutputSurfaceType(deps.get())),
106 dependency_(std::move(deps)),
107 renderer_settings_(renderer_settings) {
108 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
109 }
110
~SkiaOutputSurfaceImpl()111 SkiaOutputSurfaceImpl::~SkiaOutputSurfaceImpl() {
112 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
113 current_paint_.reset();
114 root_recorder_.reset();
115
116 if (!render_pass_image_cache_.empty()) {
117 std::vector<RenderPassId> render_pass_ids;
118 render_pass_ids.reserve(render_pass_ids.size());
119 for (auto& entry : render_pass_image_cache_)
120 render_pass_ids.push_back(entry.first);
121 RemoveRenderPassResource(std::move(render_pass_ids));
122 }
123 DCHECK(render_pass_image_cache_.empty());
124
125 // Post a task to destroy |impl_on_gpu_| on the GPU thread and block until
126 // that is finished.
127 base::WaitableEvent event;
128 auto task = base::BindOnce(
129 [](std::unique_ptr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu,
130 base::WaitableEvent* event) {
131 impl_on_gpu.reset();
132 event->Signal();
133 },
134 std::move(impl_on_gpu_), &event);
135 ScheduleGpuTask(std::move(task), {});
136 event.Wait();
137
138 gpu_task_scheduler_.reset();
139 }
140
GetSurfaceHandle() const141 gpu::SurfaceHandle SkiaOutputSurfaceImpl::GetSurfaceHandle() const {
142 return dependency_->GetSurfaceHandle();
143 }
144
BindToClient(OutputSurfaceClient * client)145 void SkiaOutputSurfaceImpl::BindToClient(OutputSurfaceClient* client) {
146 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
147 DCHECK(client);
148 DCHECK(!client_);
149 client_ = client;
150 }
151
BindFramebuffer()152 void SkiaOutputSurfaceImpl::BindFramebuffer() {
153 // TODO(penghuang): remove this method when GLRenderer is removed.
154 }
155
SetDrawRectangle(const gfx::Rect & draw_rectangle)156 void SkiaOutputSurfaceImpl::SetDrawRectangle(const gfx::Rect& draw_rectangle) {
157 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
158 DCHECK(capabilities().supports_dc_layers);
159
160 if (has_set_draw_rectangle_for_frame_)
161 return;
162
163 // TODO(kylechar): Add a check that |draw_rectangle| is the full size of the
164 // framebuffer the next time this is called after Reshape().
165
166 draw_rectangle_.emplace(draw_rectangle);
167 has_set_draw_rectangle_for_frame_ = true;
168 }
169
EnsureBackbuffer()170 void SkiaOutputSurfaceImpl::EnsureBackbuffer() {
171 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
172 // impl_on_gpu_ is released on the GPU thread by a posted task from
173 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
174 auto callback = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::EnsureBackbuffer,
175 base::Unretained(impl_on_gpu_.get()));
176 gpu_task_scheduler_->ScheduleOrRetainGpuTask(std::move(callback), {});
177 }
178
DiscardBackbuffer()179 void SkiaOutputSurfaceImpl::DiscardBackbuffer() {
180 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
181 // impl_on_gpu_ is released on the GPU thread by a posted task from
182 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
183 auto callback = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::DiscardBackbuffer,
184 base::Unretained(impl_on_gpu_.get()));
185 gpu_task_scheduler_->ScheduleOrRetainGpuTask(std::move(callback), {});
186 }
187
RecreateRootRecorder()188 void SkiaOutputSurfaceImpl::RecreateRootRecorder() {
189 DCHECK(characterization_.isValid());
190 root_recorder_.emplace(characterization_);
191
192 // This will trigger the lazy initialization of the recorder
193 ignore_result(root_recorder_->getCanvas());
194 }
195
Reshape(const gfx::Size & size,float device_scale_factor,const gfx::ColorSpace & color_space,gfx::BufferFormat format,bool use_stencil)196 void SkiaOutputSurfaceImpl::Reshape(const gfx::Size& size,
197 float device_scale_factor,
198 const gfx::ColorSpace& color_space,
199 gfx::BufferFormat format,
200 bool use_stencil) {
201 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
202 DCHECK(!size.IsEmpty());
203
204 // SetDrawRectangle() will need to be called at the new size.
205 has_set_draw_rectangle_for_frame_ = false;
206
207 // Reshape will damage all buffers.
208 current_buffer_ = 0u;
209 for (auto& damage : damage_of_buffers_)
210 damage = gfx::Rect(size);
211
212 // impl_on_gpu_ is released on the GPU thread by a posted task from
213 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
214 auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::Reshape,
215 base::Unretained(impl_on_gpu_.get()), size,
216 device_scale_factor, color_space, format,
217 use_stencil, pre_transform_);
218 ScheduleGpuTask(std::move(task), {});
219
220 color_space_ = color_space;
221 is_hdr_ = color_space_.IsHDR();
222 size_ = size;
223 characterization_ = CreateSkSurfaceCharacterization(
224 size, GetResourceFormat(format), false /* mipmap */,
225 color_space_.ToSkColorSpace(), true /* is_root_render_pass */);
226 RecreateRootRecorder();
227 }
228
SetUpdateVSyncParametersCallback(UpdateVSyncParametersCallback callback)229 void SkiaOutputSurfaceImpl::SetUpdateVSyncParametersCallback(
230 UpdateVSyncParametersCallback callback) {
231 update_vsync_parameters_callback_ = std::move(callback);
232 }
233
SetGpuVSyncEnabled(bool enabled)234 void SkiaOutputSurfaceImpl::SetGpuVSyncEnabled(bool enabled) {
235 auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::SetGpuVSyncEnabled,
236 base::Unretained(impl_on_gpu_.get()), enabled);
237 gpu_task_scheduler_->ScheduleOrRetainGpuTask(std::move(task), {});
238 }
239
SetGpuVSyncCallback(GpuVSyncCallback callback)240 void SkiaOutputSurfaceImpl::SetGpuVSyncCallback(GpuVSyncCallback callback) {
241 gpu_vsync_callback_ = std::move(callback);
242 }
243
SetDisplayTransformHint(gfx::OverlayTransform transform)244 void SkiaOutputSurfaceImpl::SetDisplayTransformHint(
245 gfx::OverlayTransform transform) {
246 if (capabilities_.supports_pre_transform)
247 pre_transform_ = transform;
248 }
249
GetDisplayTransform()250 gfx::OverlayTransform SkiaOutputSurfaceImpl::GetDisplayTransform() {
251 return pre_transform_;
252 }
253
BeginPaintCurrentFrame()254 SkCanvas* SkiaOutputSurfaceImpl::BeginPaintCurrentFrame() {
255 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
256 // Make sure there is no unsubmitted PaintFrame or PaintRenderPass.
257 DCHECK(!current_paint_);
258 DCHECK(root_recorder_);
259
260 current_paint_.emplace(&root_recorder_.value());
261
262 if (!renderer_settings_.show_overdraw_feedback)
263 return current_paint_->recorder()->getCanvas();
264
265 DCHECK(!overdraw_surface_recorder_);
266 DCHECK(renderer_settings_.show_overdraw_feedback);
267
268 SkSurfaceCharacterization characterization = CreateSkSurfaceCharacterization(
269 gfx::Size(characterization_.width(), characterization_.height()),
270 BGRA_8888, false /* mipmap */, characterization_.refColorSpace(),
271 false /* is_root_render_pass */);
272 overdraw_surface_recorder_.emplace(characterization);
273 overdraw_canvas_.emplace((overdraw_surface_recorder_->getCanvas()));
274
275 nway_canvas_.emplace(characterization_.width(), characterization_.height());
276 nway_canvas_->addCanvas(current_paint_->recorder()->getCanvas());
277 nway_canvas_->addCanvas(&overdraw_canvas_.value());
278 return &nway_canvas_.value();
279 }
280
MakePromiseSkImage(ImageContext * image_context)281 void SkiaOutputSurfaceImpl::MakePromiseSkImage(ImageContext* image_context) {
282 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
283 DCHECK(current_paint_);
284 DCHECK(!image_context->mailbox_holder().mailbox.IsZero());
285
286 images_in_current_paint_.push_back(
287 static_cast<ImageContextImpl*>(image_context));
288
289 if (image_context->has_image())
290 return;
291
292 SkColorType color_type = ResourceFormatToClosestSkColorType(
293 true /* gpu_compositing */, image_context->resource_format());
294 GrBackendFormat backend_format = GetGrBackendFormatForTexture(
295 image_context->resource_format(),
296 image_context->mailbox_holder().texture_target,
297 image_context->ycbcr_info());
298 image_context->SetImage(
299 current_paint_->recorder()->makePromiseTexture(
300 backend_format, image_context->size().width(),
301 image_context->size().height(), GrMipMapped::kNo,
302 image_context->origin(), color_type, image_context->alpha_type(),
303 image_context->color_space(), Fulfill /* fulfillProc */,
304 DoNothing /* releaseProc */, DoNothing /* doneProc */,
305 image_context /* context */),
306 backend_format);
307
308 if (image_context->mailbox_holder().sync_token.HasData()) {
309 resource_sync_tokens_.push_back(image_context->mailbox_holder().sync_token);
310 image_context->mutable_mailbox_holder()->sync_token.Clear();
311 }
312 }
313
MakePromiseSkImageFromYUV(const std::vector<ImageContext * > & contexts,sk_sp<SkColorSpace> image_color_space,bool has_alpha)314 sk_sp<SkImage> SkiaOutputSurfaceImpl::MakePromiseSkImageFromYUV(
315 const std::vector<ImageContext*>& contexts,
316 sk_sp<SkColorSpace> image_color_space,
317 bool has_alpha) {
318 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
319 DCHECK(current_paint_);
320 DCHECK((has_alpha && (contexts.size() == 3 || contexts.size() == 4)) ||
321 (!has_alpha && (contexts.size() == 2 || contexts.size() == 3)));
322
323 SkYUVAIndex indices[4];
324 PrepareYUVATextureIndices(contexts, has_alpha, indices);
325
326 GrBackendFormat formats[4] = {};
327 SkISize yuva_sizes[4] = {};
328 SkDeferredDisplayListRecorder::PromiseImageTextureContext
329 texture_contexts[4] = {};
330 for (size_t i = 0; i < contexts.size(); ++i) {
331 auto* context = static_cast<ImageContextImpl*>(contexts[i]);
332 DCHECK(context->origin() == kTopLeft_GrSurfaceOrigin);
333 formats[i] = GetGrBackendFormatForTexture(
334 context->resource_format(), context->mailbox_holder().texture_target,
335 /*ycbcr_info=*/base::nullopt);
336 yuva_sizes[i].set(context->size().width(), context->size().height());
337
338 // NOTE: We don't have promises for individual planes, but still need format
339 // for fallback
340 context->SetImage(nullptr, formats[i]);
341
342 if (context->mailbox_holder().sync_token.HasData()) {
343 resource_sync_tokens_.push_back(context->mailbox_holder().sync_token);
344 context->mutable_mailbox_holder()->sync_token.Clear();
345 }
346 images_in_current_paint_.push_back(context);
347 texture_contexts[i] = context;
348 }
349
350 // Note: YUV to RGB conversion is handled by a color filter in SkiaRenderer.
351 auto image = current_paint_->recorder()->makeYUVAPromiseTexture(
352 kIdentity_SkYUVColorSpace, formats, yuva_sizes, indices,
353 yuva_sizes[0].width(), yuva_sizes[0].height(), kTopLeft_GrSurfaceOrigin,
354 image_color_space, Fulfill, DoNothing, DoNothing, texture_contexts);
355 DCHECK(image);
356 return image;
357 }
358
ReleaseImageContexts(std::vector<std::unique_ptr<ImageContext>> image_contexts)359 void SkiaOutputSurfaceImpl::ReleaseImageContexts(
360 std::vector<std::unique_ptr<ImageContext>> image_contexts) {
361 if (image_contexts.empty())
362 return;
363
364 // impl_on_gpu_ is released on the GPU thread by a posted task from
365 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
366 auto callback = base::BindOnce(
367 &SkiaOutputSurfaceImplOnGpu::ReleaseImageContexts,
368 base::Unretained(impl_on_gpu_.get()), std::move(image_contexts));
369 gpu_task_scheduler_->ScheduleOrRetainGpuTask(std::move(callback), {});
370 }
371
372 std::unique_ptr<ExternalUseClient::ImageContext>
CreateImageContext(const gpu::MailboxHolder & holder,const gfx::Size & size,ResourceFormat format,const base::Optional<gpu::VulkanYCbCrInfo> & ycbcr_info,sk_sp<SkColorSpace> color_space)373 SkiaOutputSurfaceImpl::CreateImageContext(
374 const gpu::MailboxHolder& holder,
375 const gfx::Size& size,
376 ResourceFormat format,
377 const base::Optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
378 sk_sp<SkColorSpace> color_space) {
379 return std::make_unique<ImageContextImpl>(holder, size, format, ycbcr_info,
380 std::move(color_space));
381 }
382
SwapBuffers(OutputSurfaceFrame frame)383 void SkiaOutputSurfaceImpl::SwapBuffers(OutputSurfaceFrame frame) {
384 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
385 DCHECK(!current_paint_);
386 DCHECK_EQ(!frame.sub_buffer_rect || !frame.sub_buffer_rect->IsEmpty(),
387 current_buffer_modified_);
388
389 has_set_draw_rectangle_for_frame_ = false;
390
391 // If current_buffer_modified_ is false, it means SkiaRenderer doesn't draw
392 // anything for current frame. So this SwapBuffer() must be a empty swap, so
393 // the previous buffer will be used for this frame.
394 if (!damage_of_buffers_.empty() && current_buffer_modified_) {
395 gfx::Rect damage_rect =
396 frame.sub_buffer_rect ? *frame.sub_buffer_rect : gfx::Rect(size_);
397 // Calculate damage area for every buffer.
398 for (size_t i = 0u; i < damage_of_buffers_.size(); ++i) {
399 if (i == current_buffer_) {
400 damage_of_buffers_[i] = gfx::Rect();
401 } else {
402 damage_of_buffers_[i].Union(damage_rect);
403 }
404 }
405 // change the current buffer index to the next buffer in the queue.
406 if (++current_buffer_ == damage_of_buffers_.size())
407 current_buffer_ = 0u;
408 }
409 current_buffer_modified_ = false;
410 // impl_on_gpu_ is released on the GPU thread by a posted task from
411 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
412 auto callback =
413 base::BindOnce(&SkiaOutputSurfaceImplOnGpu::SwapBuffers,
414 base::Unretained(impl_on_gpu_.get()), std::move(frame),
415 std::move(deferred_framebuffer_draw_closure_));
416 ScheduleGpuTask(std::move(callback), std::move(resource_sync_tokens_));
417
418 // Recreate |root_recorder_| after SwapBuffers has been scheduled on GPU
419 // thread to save some time in BeginPaintCurrentFrame
420 // TODO(vasilyt): reuse root recorder
421 RecreateRootRecorder();
422 }
423
SwapBuffersSkipped()424 void SkiaOutputSurfaceImpl::SwapBuffersSkipped() {
425 if (deferred_framebuffer_draw_closure_) {
426 // Run the task to draw the root RenderPass on the GPU thread. If we aren't
427 // going to swap buffers and there are no CopyOutputRequests on the root
428 // RenderPass we don't strictly need to draw. However, we still need to
429 // PostTask to the GPU thread to deal with freeing resources and running
430 // callbacks. This is infrequent and all the work is already done in
431 // FinishPaintCurrentFrame() so use the same path.
432 auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::SwapBuffersSkipped,
433 base::Unretained(impl_on_gpu_.get()),
434 std::move(deferred_framebuffer_draw_closure_));
435 ScheduleGpuTask(std::move(task), std::move(resource_sync_tokens_));
436
437 // TODO(vasilyt): reuse root recorder
438 RecreateRootRecorder();
439 }
440 }
441
ScheduleOutputSurfaceAsOverlay(OverlayProcessorInterface::OutputSurfaceOverlayPlane output_surface_plane)442 void SkiaOutputSurfaceImpl::ScheduleOutputSurfaceAsOverlay(
443 OverlayProcessorInterface::OutputSurfaceOverlayPlane output_surface_plane) {
444 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
445 // impl_on_gpu_ is released on the GPU thread by a posted task from
446 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
447 auto callback = base::BindOnce(
448 &SkiaOutputSurfaceImplOnGpu::ScheduleOutputSurfaceAsOverlay,
449 base::Unretained(impl_on_gpu_.get()), std::move(output_surface_plane));
450 ScheduleGpuTask(std::move(callback), {});
451 }
452
BeginPaintRenderPass(const RenderPassId & id,const gfx::Size & surface_size,ResourceFormat format,bool mipmap,sk_sp<SkColorSpace> color_space)453 SkCanvas* SkiaOutputSurfaceImpl::BeginPaintRenderPass(
454 const RenderPassId& id,
455 const gfx::Size& surface_size,
456 ResourceFormat format,
457 bool mipmap,
458 sk_sp<SkColorSpace> color_space) {
459 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
460 // Make sure there is no unsubmitted PaintFrame or PaintRenderPass.
461 DCHECK(!current_paint_);
462 DCHECK(resource_sync_tokens_.empty());
463
464 SkSurfaceCharacterization c = CreateSkSurfaceCharacterization(
465 surface_size, format, mipmap, std::move(color_space),
466 false /* is_root_render_pass */);
467 current_paint_.emplace(c, id);
468 return current_paint_->recorder()->getCanvas();
469 }
470
SubmitPaint(base::OnceClosure on_finished)471 gpu::SyncToken SkiaOutputSurfaceImpl::SubmitPaint(
472 base::OnceClosure on_finished) {
473 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
474 DCHECK(current_paint_);
475 DCHECK(!deferred_framebuffer_draw_closure_);
476 // If current_render_pass_id_ is not 0, we are painting a render pass.
477 // Otherwise we are painting a frame.
478
479 bool painting_render_pass = current_paint_->render_pass_id() != 0;
480
481 gpu::SyncToken sync_token(
482 gpu::CommandBufferNamespace::VIZ_SKIA_OUTPUT_SURFACE,
483 impl_on_gpu_->command_buffer_id(), ++sync_fence_release_);
484 sync_token.SetVerifyFlush();
485
486 auto ddl = current_paint_->recorder()->detach();
487 DCHECK(ddl);
488
489 // impl_on_gpu_ is released on the GPU thread by a posted task from
490 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
491 if (painting_render_pass) {
492 auto it = render_pass_image_cache_.find(current_paint_->render_pass_id());
493 if (it != render_pass_image_cache_.end()) {
494 // We are going to overwrite the render pass, so we need reset the
495 // image_context, so a new promise image will be created when the
496 // MakePromiseSkImageFromRenderPass() is called.
497 it->second->clear_image();
498 }
499 DCHECK(!on_finished);
500 auto closure = base::BindOnce(
501 &SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass,
502 base::Unretained(impl_on_gpu_.get()), current_paint_->render_pass_id(),
503 std::move(ddl), std::move(images_in_current_paint_),
504 resource_sync_tokens_, sync_fence_release_);
505 ScheduleGpuTask(std::move(closure), std::move(resource_sync_tokens_));
506 } else {
507 // Draw on the root render pass.
508 current_buffer_modified_ = true;
509 std::unique_ptr<SkDeferredDisplayList> overdraw_ddl;
510 if (renderer_settings_.show_overdraw_feedback) {
511 overdraw_ddl = overdraw_surface_recorder_->detach();
512 DCHECK(overdraw_ddl);
513 overdraw_canvas_.reset();
514 nway_canvas_.reset();
515 overdraw_surface_recorder_.reset();
516 }
517
518 deferred_framebuffer_draw_closure_ = base::BindOnce(
519 &SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame,
520 base::Unretained(impl_on_gpu_.get()), std::move(ddl),
521 std::move(overdraw_ddl), std::move(images_in_current_paint_),
522 resource_sync_tokens_, sync_fence_release_, std::move(on_finished),
523 draw_rectangle_);
524 draw_rectangle_.reset();
525 }
526 images_in_current_paint_.clear();
527 current_paint_.reset();
528 return sync_token;
529 }
530
MakePromiseSkImageFromRenderPass(const RenderPassId & id,const gfx::Size & size,ResourceFormat format,bool mipmap,sk_sp<SkColorSpace> color_space)531 sk_sp<SkImage> SkiaOutputSurfaceImpl::MakePromiseSkImageFromRenderPass(
532 const RenderPassId& id,
533 const gfx::Size& size,
534 ResourceFormat format,
535 bool mipmap,
536 sk_sp<SkColorSpace> color_space) {
537 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
538 DCHECK(current_paint_);
539
540 auto& image_context = render_pass_image_cache_[id];
541 if (!image_context) {
542 image_context = std::make_unique<ImageContextImpl>(id, size, format, mipmap,
543 std::move(color_space));
544 }
545 if (!image_context->has_image()) {
546 SkColorType color_type =
547 ResourceFormatToClosestSkColorType(true /* gpu_compositing */, format);
548 GrBackendFormat backend_format = GetGrBackendFormatForTexture(
549 format, GL_TEXTURE_2D, /*ycbcr_info=*/base::nullopt);
550 image_context->SetImage(
551 current_paint_->recorder()->makePromiseTexture(
552 backend_format, image_context->size().width(),
553 image_context->size().height(), image_context->mipmap(),
554 image_context->origin(), color_type, image_context->alpha_type(),
555 image_context->color_space(), Fulfill, DoNothing, DoNothing,
556 image_context.get()),
557 backend_format);
558 DCHECK(image_context->has_image());
559 }
560 images_in_current_paint_.push_back(image_context.get());
561 return image_context->image();
562 }
563
RemoveRenderPassResource(std::vector<RenderPassId> ids)564 void SkiaOutputSurfaceImpl::RemoveRenderPassResource(
565 std::vector<RenderPassId> ids) {
566 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
567 DCHECK(!ids.empty());
568
569 std::vector<std::unique_ptr<ImageContextImpl>> image_contexts;
570 image_contexts.reserve(ids.size());
571 for (const auto id : ids) {
572 auto it = render_pass_image_cache_.find(id);
573 // If the render pass was only used for a copy request, there won't be a
574 // matching entry in |render_pass_image_cache_|.
575 if (it != render_pass_image_cache_.end()) {
576 it->second->clear_image();
577 image_contexts.push_back(std::move(it->second));
578 render_pass_image_cache_.erase(it);
579 }
580 }
581
582 // impl_on_gpu_ is released on the GPU thread by a posted task from
583 // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
584 auto callback =
585 base::BindOnce(&SkiaOutputSurfaceImplOnGpu::RemoveRenderPassResource,
586 base::Unretained(impl_on_gpu_.get()), std::move(ids),
587 std::move(image_contexts));
588 ScheduleGpuTask(std::move(callback), {});
589 }
590
CopyOutput(RenderPassId id,const copy_output::RenderPassGeometry & geometry,const gfx::ColorSpace & color_space,std::unique_ptr<CopyOutputRequest> request)591 void SkiaOutputSurfaceImpl::CopyOutput(
592 RenderPassId id,
593 const copy_output::RenderPassGeometry& geometry,
594 const gfx::ColorSpace& color_space,
595 std::unique_ptr<CopyOutputRequest> request) {
596 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
597 if (!request->has_result_task_runner())
598 request->set_result_task_runner(base::ThreadTaskRunnerHandle::Get());
599
600 auto callback = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::CopyOutput,
601 base::Unretained(impl_on_gpu_.get()), id,
602 geometry, color_space, std::move(request),
603 std::move(deferred_framebuffer_draw_closure_));
604 ScheduleGpuTask(std::move(callback), std::move(resource_sync_tokens_));
605 }
606
ScheduleOverlays(OverlayList overlays,std::vector<gpu::SyncToken> sync_tokens)607 void SkiaOutputSurfaceImpl::ScheduleOverlays(
608 OverlayList overlays,
609 std::vector<gpu::SyncToken> sync_tokens) {
610 auto task =
611 base::BindOnce(&SkiaOutputSurfaceImplOnGpu::ScheduleOverlays,
612 base::Unretained(impl_on_gpu_.get()), std::move(overlays));
613 ScheduleGpuTask(std::move(task), std::move(sync_tokens));
614 }
615
616 #if defined(OS_WIN)
SetEnableDCLayers(bool enable)617 void SkiaOutputSurfaceImpl::SetEnableDCLayers(bool enable) {
618 auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::SetEnableDCLayers,
619 base::Unretained(impl_on_gpu_.get()), enable);
620 ScheduleGpuTask(std::move(task), {});
621 }
622 #endif
623
GetMemoryTracker()624 gpu::MemoryTracker* SkiaOutputSurfaceImpl::GetMemoryTracker() {
625 // Should only be called after initialization.
626 DCHECK(impl_on_gpu_);
627 return impl_on_gpu_->GetMemoryTracker();
628 }
629
SetCapabilitiesForTesting(gfx::SurfaceOrigin output_surface_origin)630 void SkiaOutputSurfaceImpl::SetCapabilitiesForTesting(
631 gfx::SurfaceOrigin output_surface_origin) {
632 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
633 DCHECK(impl_on_gpu_);
634 capabilities_.output_surface_origin = output_surface_origin;
635 auto callback =
636 base::BindOnce(&SkiaOutputSurfaceImplOnGpu::SetCapabilitiesForTesting,
637 base::Unretained(impl_on_gpu_.get()), capabilities_);
638 ScheduleGpuTask(std::move(callback), {});
639 }
640
Initialize()641 bool SkiaOutputSurfaceImpl::Initialize() {
642 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
643
644 // Before starting to schedule GPU task, set up |gpu_task_scheduler_| that
645 // holds a task sequence.
646 gpu_task_scheduler_ = base::MakeRefCounted<gpu::GpuTaskSchedulerHelper>(
647 dependency_->CreateSequence());
648
649 weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
650
651 // This runner could be called from vsync or GPU thread after |this| is
652 // destroyed. We post directly to display compositor thread to check
653 // |weak_ptr_| as |dependency_| may have been destroyed.
654 GpuVSyncCallback vsync_callback_runner =
655 #if defined(OS_ANDROID)
656 // Callback is never used on Android. Doesn't work with WebView because
657 // calling it bypasses SkiaOutputSurfaceDependency.
658 base::DoNothing();
659 #else
660 base::BindRepeating(
661 [](scoped_refptr<base::SingleThreadTaskRunner> runner,
662 base::WeakPtr<SkiaOutputSurfaceImpl> weak_ptr,
663 base::TimeTicks timebase, base::TimeDelta interval) {
664 runner->PostTask(FROM_HERE,
665 base::BindOnce(&SkiaOutputSurfaceImpl::OnGpuVSync,
666 weak_ptr, timebase, interval));
667 },
668 base::ThreadTaskRunnerHandle::Get(), weak_ptr_);
669 #endif
670
671 base::WaitableEvent event;
672 bool result = false;
673 auto callback = base::BindOnce(&SkiaOutputSurfaceImpl::InitializeOnGpuThread,
674 base::Unretained(this), vsync_callback_runner,
675 &event, &result);
676 ScheduleGpuTask(std::move(callback), {});
677 event.Wait();
678
679 if (capabilities_.preserve_buffer_content &&
680 capabilities_.supports_post_sub_buffer) {
681 capabilities_.only_invalidates_damage_rect = false;
682 damage_of_buffers_.resize(capabilities_.max_frames_pending + 1);
683 }
684
685 return result;
686 }
687
InitializeOnGpuThread(GpuVSyncCallback vsync_callback_runner,base::WaitableEvent * event,bool * result)688 void SkiaOutputSurfaceImpl::InitializeOnGpuThread(
689 GpuVSyncCallback vsync_callback_runner,
690 base::WaitableEvent* event,
691 bool* result) {
692 base::Optional<base::ScopedClosureRunner> scoped_runner;
693 if (event) {
694 scoped_runner.emplace(
695 base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(event)));
696 }
697
698 auto did_swap_buffer_complete_callback = base::BindRepeating(
699 &SkiaOutputSurfaceImpl::DidSwapBuffersComplete, weak_ptr_);
700 auto buffer_presented_callback =
701 base::BindRepeating(&SkiaOutputSurfaceImpl::BufferPresented, weak_ptr_);
702 auto context_lost_callback =
703 base::BindOnce(&SkiaOutputSurfaceImpl::ContextLost, weak_ptr_);
704
705 impl_on_gpu_ = SkiaOutputSurfaceImplOnGpu::Create(
706 dependency_.get(), renderer_settings_,
707 gpu_task_scheduler_->GetSequenceId(),
708 std::move(did_swap_buffer_complete_callback),
709 std::move(buffer_presented_callback), std::move(context_lost_callback),
710 std::move(vsync_callback_runner));
711 if (!impl_on_gpu_) {
712 *result = false;
713 } else {
714 capabilities_ = impl_on_gpu_->capabilities();
715 is_displayed_as_overlay_ = impl_on_gpu_->IsDisplayedAsOverlay();
716 *result = true;
717 }
718 }
719
720 SkSurfaceCharacterization
CreateSkSurfaceCharacterization(const gfx::Size & surface_size,ResourceFormat format,bool mipmap,sk_sp<SkColorSpace> color_space,bool is_root_render_pass)721 SkiaOutputSurfaceImpl::CreateSkSurfaceCharacterization(
722 const gfx::Size& surface_size,
723 ResourceFormat format,
724 bool mipmap,
725 sk_sp<SkColorSpace> color_space,
726 bool is_root_render_pass) {
727 auto gr_context_thread_safe = impl_on_gpu_->GetGrContextThreadSafeProxy();
728 auto cache_max_resource_bytes = impl_on_gpu_->max_resource_cache_bytes();
729 // LegacyFontHost will get LCD text and skia figures out what type to use.
730 SkSurfaceProps surface_props(0 /*flags */,
731 SkSurfaceProps::kLegacyFontHost_InitType);
732 if (is_root_render_pass) {
733 auto color_type =
734 is_hdr_ && capabilities_.sk_color_type_for_hdr != kUnknown_SkColorType
735 ? capabilities_.sk_color_type_for_hdr
736 : capabilities_.sk_color_type;
737
738 const auto& backend_format =
739 is_hdr_ && capabilities_.gr_backend_format_for_hdr.isValid()
740 ? capabilities_.gr_backend_format_for_hdr
741 : capabilities_.gr_backend_format;
742 auto surface_origin =
743 capabilities_.output_surface_origin == gfx::SurfaceOrigin::kBottomLeft
744 ? kBottomLeft_GrSurfaceOrigin
745 : kTopLeft_GrSurfaceOrigin;
746 auto image_info = SkImageInfo::Make(
747 surface_size.width(), surface_size.height(), color_type,
748 kPremul_SkAlphaType, std::move(color_space));
749 DCHECK((capabilities_.uses_default_gl_framebuffer &&
750 dependency_->gr_context_type() == gpu::GrContextType::kGL) ||
751 !capabilities_.uses_default_gl_framebuffer);
752 auto characterization = gr_context_thread_safe->createCharacterization(
753 cache_max_resource_bytes, image_info, backend_format,
754 0 /* sampleCount */, surface_origin, surface_props, mipmap,
755 capabilities_.uses_default_gl_framebuffer, false /* isTextureable */,
756 impl_on_gpu_->GetGpuPreferences().enforce_vulkan_protected_memory
757 ? GrProtected::kYes
758 : GrProtected::kNo);
759 DCHECK(characterization.isValid());
760 return characterization;
761 }
762
763 auto color_type =
764 ResourceFormatToClosestSkColorType(true /* gpu_compositing */, format);
765 auto backend_format = gr_context_thread_safe->defaultBackendFormat(
766 color_type, GrRenderable::kYes);
767 DCHECK(backend_format.isValid());
768 auto image_info =
769 SkImageInfo::Make(surface_size.width(), surface_size.height(), color_type,
770 kPremul_SkAlphaType, std::move(color_space));
771
772 auto characterization = gr_context_thread_safe->createCharacterization(
773 cache_max_resource_bytes, image_info, backend_format, 0 /* sampleCount */,
774 kTopLeft_GrSurfaceOrigin, surface_props, mipmap,
775 false /* willUseGLFBO0 */, true /* isTextureable */,
776 impl_on_gpu_->GetGpuPreferences().enforce_vulkan_protected_memory
777 ? GrProtected::kYes
778 : GrProtected::kNo);
779 DCHECK(characterization.isValid());
780 return characterization;
781 }
782
DidSwapBuffersComplete(gpu::SwapBuffersCompleteParams params,const gfx::Size & pixel_size)783 void SkiaOutputSurfaceImpl::DidSwapBuffersComplete(
784 gpu::SwapBuffersCompleteParams params,
785 const gfx::Size& pixel_size) {
786 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
787 DCHECK(client_);
788
789 if (!params.texture_in_use_responses.empty())
790 client_->DidReceiveTextureInUseResponses(params.texture_in_use_responses);
791 if (!params.ca_layer_params.is_empty)
792 client_->DidReceiveCALayerParams(params.ca_layer_params);
793 client_->DidReceiveSwapBuffersAck(params.swap_response.timings);
794 if (needs_swap_size_notifications_)
795 client_->DidSwapWithSize(pixel_size);
796 }
797
BufferPresented(const gfx::PresentationFeedback & feedback)798 void SkiaOutputSurfaceImpl::BufferPresented(
799 const gfx::PresentationFeedback& feedback) {
800 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
801 DCHECK(client_);
802 client_->DidReceivePresentationFeedback(feedback);
803 if (update_vsync_parameters_callback_ &&
804 feedback.flags & gfx::PresentationFeedback::kVSync) {
805 // TODO(brianderson): We should not be receiving 0 intervals.
806 update_vsync_parameters_callback_.Run(
807 feedback.timestamp, feedback.interval.is_zero()
808 ? BeginFrameArgs::DefaultInterval()
809 : feedback.interval);
810 }
811 }
812
OnGpuVSync(base::TimeTicks timebase,base::TimeDelta interval)813 void SkiaOutputSurfaceImpl::OnGpuVSync(base::TimeTicks timebase,
814 base::TimeDelta interval) {
815 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
816 if (gpu_vsync_callback_)
817 gpu_vsync_callback_.Run(timebase, interval);
818 }
819
ScheduleGpuTaskForTesting(base::OnceClosure callback,std::vector<gpu::SyncToken> sync_tokens)820 void SkiaOutputSurfaceImpl::ScheduleGpuTaskForTesting(
821 base::OnceClosure callback,
822 std::vector<gpu::SyncToken> sync_tokens) {
823 ScheduleGpuTask(std::move(callback), std::move(sync_tokens));
824 }
825
ScheduleGpuTask(base::OnceClosure callback,std::vector<gpu::SyncToken> sync_tokens)826 void SkiaOutputSurfaceImpl::ScheduleGpuTask(
827 base::OnceClosure callback,
828 std::vector<gpu::SyncToken> sync_tokens) {
829 auto wrapped_closure = base::BindOnce(
830 [](base::OnceClosure callback) {
831 gpu::ContextUrl::SetActiveUrl(GetActiveUrl());
832 std::move(callback).Run();
833 },
834 std::move(callback));
835 gpu_task_scheduler_->ScheduleGpuTask(std::move(wrapped_closure),
836 std::move(sync_tokens));
837 }
838
GetGrBackendFormatForTexture(ResourceFormat resource_format,uint32_t gl_texture_target,const base::Optional<gpu::VulkanYCbCrInfo> & ycbcr_info)839 GrBackendFormat SkiaOutputSurfaceImpl::GetGrBackendFormatForTexture(
840 ResourceFormat resource_format,
841 uint32_t gl_texture_target,
842 const base::Optional<gpu::VulkanYCbCrInfo>& ycbcr_info) {
843 if (dependency_->IsUsingVulkan()) {
844 #if BUILDFLAG(ENABLE_VULKAN)
845 if (!ycbcr_info) {
846 // YCbCr info is required for YUV images.
847 DCHECK(resource_format != YVU_420 && resource_format != YUV_420_BIPLANAR);
848 return GrBackendFormat::MakeVk(ToVkFormat(resource_format));
849 }
850
851 // Assume optimal tiling.
852 GrVkYcbcrConversionInfo gr_ycbcr_info =
853 CreateGrVkYcbcrConversionInfo(dependency_->GetVulkanContextProvider()
854 ->GetDeviceQueue()
855 ->GetVulkanPhysicalDevice(),
856 VK_IMAGE_TILING_OPTIMAL, ycbcr_info);
857 return GrBackendFormat::MakeVk(gr_ycbcr_info);
858 #endif
859 } else if (dependency_->IsUsingDawn()) {
860 #if BUILDFLAG(SKIA_USE_DAWN)
861 wgpu::TextureFormat format = ToDawnFormat(resource_format);
862 return GrBackendFormat::MakeDawn(format);
863 #endif
864 } else {
865 DCHECK(!ycbcr_info);
866 // Convert internal format from GLES2 to platform GL.
867 unsigned int texture_storage_format = gpu::GetGrGLBackendTextureFormat(
868 impl_on_gpu_->GetFeatureInfo(), resource_format);
869
870 return GrBackendFormat::MakeGL(texture_storage_format, gl_texture_target);
871 }
872 NOTREACHED();
873 return GrBackendFormat();
874 }
875
GetFramebufferCopyTextureFormat()876 uint32_t SkiaOutputSurfaceImpl::GetFramebufferCopyTextureFormat() {
877 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
878
879 return GL_RGB;
880 }
881
IsDisplayedAsOverlayPlane() const882 bool SkiaOutputSurfaceImpl::IsDisplayedAsOverlayPlane() const {
883 return is_displayed_as_overlay_;
884 }
885
GetOverlayTextureId() const886 unsigned SkiaOutputSurfaceImpl::GetOverlayTextureId() const {
887 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
888 return 0;
889 }
890
HasExternalStencilTest() const891 bool SkiaOutputSurfaceImpl::HasExternalStencilTest() const {
892 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
893
894 return false;
895 }
896
ApplyExternalStencil()897 void SkiaOutputSurfaceImpl::ApplyExternalStencil() {
898 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
899 }
900
UpdateGpuFence()901 unsigned SkiaOutputSurfaceImpl::UpdateGpuFence() {
902 return 0;
903 }
904
SetNeedsSwapSizeNotifications(bool needs_swap_size_notifications)905 void SkiaOutputSurfaceImpl::SetNeedsSwapSizeNotifications(
906 bool needs_swap_size_notifications) {
907 needs_swap_size_notifications_ = needs_swap_size_notifications;
908 }
909
GetCacheBackBufferCb()910 base::ScopedClosureRunner SkiaOutputSurfaceImpl::GetCacheBackBufferCb() {
911 if (!impl_on_gpu_->gl_surface())
912 return base::ScopedClosureRunner();
913 return dependency_->CacheGLSurface(impl_on_gpu_->gl_surface());
914 }
915
AddContextLostObserver(ContextLostObserver * observer)916 void SkiaOutputSurfaceImpl::AddContextLostObserver(
917 ContextLostObserver* observer) {
918 observers_.AddObserver(observer);
919 }
920
RemoveContextLostObserver(ContextLostObserver * observer)921 void SkiaOutputSurfaceImpl::RemoveContextLostObserver(
922 ContextLostObserver* observer) {
923 observers_.RemoveObserver(observer);
924 }
925
PrepareYUVATextureIndices(const std::vector<ImageContext * > & contexts,bool has_alpha,SkYUVAIndex indices[4])926 void SkiaOutputSurfaceImpl::PrepareYUVATextureIndices(
927 const std::vector<ImageContext*>& contexts,
928 bool has_alpha,
929 SkYUVAIndex indices[4]) {
930 DCHECK((has_alpha && (contexts.size() == 3 || contexts.size() == 4)) ||
931 (!has_alpha && (contexts.size() == 2 || contexts.size() == 3)));
932
933 bool uv_interleaved = has_alpha ? contexts.size() == 3 : contexts.size() == 2;
934
935 indices[SkYUVAIndex::kY_Index].fIndex = 0;
936 indices[SkYUVAIndex::kY_Index].fChannel = SkColorChannel::kR;
937
938 if (uv_interleaved) {
939 indices[SkYUVAIndex::kU_Index].fIndex = 1;
940 indices[SkYUVAIndex::kU_Index].fChannel = SkColorChannel::kR;
941
942 indices[SkYUVAIndex::kV_Index].fIndex = 1;
943 indices[SkYUVAIndex::kV_Index].fChannel = SkColorChannel::kG;
944
945 indices[SkYUVAIndex::kA_Index].fIndex = has_alpha ? 2 : -1;
946 indices[SkYUVAIndex::kA_Index].fChannel = SkColorChannel::kR;
947 } else {
948 indices[SkYUVAIndex::kU_Index].fIndex = 1;
949 indices[SkYUVAIndex::kU_Index].fChannel = SkColorChannel::kR;
950
951 indices[SkYUVAIndex::kV_Index].fIndex = 2;
952 indices[SkYUVAIndex::kV_Index].fChannel = SkColorChannel::kR;
953
954 indices[SkYUVAIndex::kA_Index].fIndex = has_alpha ? 3 : -1;
955 indices[SkYUVAIndex::kA_Index].fChannel = SkColorChannel::kR;
956 }
957 }
958
ContextLost()959 void SkiaOutputSurfaceImpl::ContextLost() {
960 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
961 for (auto& observer : observers_)
962 observer.OnContextLost();
963 }
964
965 scoped_refptr<gpu::GpuTaskSchedulerHelper>
GetGpuTaskSchedulerHelper()966 SkiaOutputSurfaceImpl::GetGpuTaskSchedulerHelper() {
967 return gpu_task_scheduler_;
968 }
969
GetCurrentFramebufferDamage() const970 gfx::Rect SkiaOutputSurfaceImpl::GetCurrentFramebufferDamage() const {
971 if (damage_of_buffers_.empty())
972 return gfx::Rect();
973
974 DCHECK_LT(current_buffer_, damage_of_buffers_.size());
975 return damage_of_buffers_[current_buffer_];
976 }
977
978 } // namespace viz
979