1 // Copyright (c) 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/raster_decoder.h"
6
7 #include <stdint.h>
8
9 #include <algorithm>
10 #include <memory>
11 #include <string>
12 #include <utility>
13 #include <vector>
14
15 #include "base/atomic_sequence_num.h"
16 #include "base/bind.h"
17 #include "base/bits.h"
18 #include "base/containers/flat_map.h"
19 #include "base/debug/crash_logging.h"
20 #include "base/logging.h"
21 #include "base/memory/ref_counted.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/stl_util.h"
24 #include "base/strings/stringprintf.h"
25 #include "base/trace_event/trace_event.h"
26 #include "build/build_config.h"
27 #include "cc/paint/paint_cache.h"
28 #include "cc/paint/paint_op_buffer.h"
29 #include "cc/paint/transfer_cache_entry.h"
30 #include "components/viz/common/resources/resource_format_utils.h"
31 #include "gpu/command_buffer/common/capabilities.h"
32 #include "gpu/command_buffer/common/command_buffer_id.h"
33 #include "gpu/command_buffer/common/constants.h"
34 #include "gpu/command_buffer/common/context_result.h"
35 #include "gpu/command_buffer/common/debug_marker_manager.h"
36 #include "gpu/command_buffer/common/mailbox.h"
37 #include "gpu/command_buffer/common/raster_cmd_format.h"
38 #include "gpu/command_buffer/common/raster_cmd_ids.h"
39 #include "gpu/command_buffer/common/sync_token.h"
40 #include "gpu/command_buffer/service/command_buffer_service.h"
41 #include "gpu/command_buffer/service/context_state.h"
42 #include "gpu/command_buffer/service/decoder_client.h"
43 #include "gpu/command_buffer/service/error_state.h"
44 #include "gpu/command_buffer/service/feature_info.h"
45 #include "gpu/command_buffer/service/gl_utils.h"
46 #include "gpu/command_buffer/service/gles2_cmd_copy_tex_image.h"
47 #include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
48 #include "gpu/command_buffer/service/gpu_tracer.h"
49 #include "gpu/command_buffer/service/logger.h"
50 #include "gpu/command_buffer/service/mailbox_manager.h"
51 #include "gpu/command_buffer/service/query_manager.h"
52 #include "gpu/command_buffer/service/raster_cmd_validation.h"
53 #include "gpu/command_buffer/service/service_font_manager.h"
54 #include "gpu/command_buffer/service/service_transfer_cache.h"
55 #include "gpu/command_buffer/service/service_utils.h"
56 #include "gpu/command_buffer/service/shared_context_state.h"
57 #include "gpu/command_buffer/service/shared_image_factory.h"
58 #include "gpu/command_buffer/service/shared_image_representation.h"
59 #include "gpu/command_buffer/service/skia_utils.h"
60 #include "gpu/command_buffer/service/wrapped_sk_image.h"
61 #include "gpu/vulkan/buildflags.h"
62 #include "skia/ext/legacy_display_globals.h"
63 #include "third_party/skia/include/core/SkCanvas.h"
64 #include "third_party/skia/include/core/SkDeferredDisplayListRecorder.h"
65 #include "third_party/skia/include/core/SkPromiseImageTexture.h"
66 #include "third_party/skia/include/core/SkSurface.h"
67 #include "third_party/skia/include/core/SkSurfaceProps.h"
68 #include "third_party/skia/include/core/SkTypeface.h"
69 #include "third_party/skia/include/core/SkYUVAInfo.h"
70 #include "third_party/skia/include/gpu/GrBackendSemaphore.h"
71 #include "third_party/skia/include/gpu/GrBackendSurface.h"
72 #include "third_party/skia/include/gpu/GrDirectContext.h"
73 #include "third_party/skia/include/gpu/GrYUVABackendTextures.h"
74 #include "ui/gfx/buffer_format_util.h"
75 #include "ui/gfx/skia_util.h"
76 #include "ui/gl/gl_context.h"
77 #include "ui/gl/gl_gl_api_implementation.h"
78 #include "ui/gl/gl_surface.h"
79 #include "ui/gl/gl_version_info.h"
80
81 #if BUILDFLAG(ENABLE_VULKAN)
82 #include "components/viz/common/gpu/vulkan_context_provider.h"
83 #include "gpu/vulkan/vulkan_device_queue.h"
84 #endif
85
86 // Local versions of the SET_GL_ERROR macros
87 #define LOCAL_SET_GL_ERROR(error, function_name, msg) \
88 ERRORSTATE_SET_GL_ERROR(error_state_.get(), error, function_name, msg)
89 #define LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, value, label) \
90 ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state_.get(), function_name, \
91 static_cast<uint32_t>(value), label)
92 #define LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name) \
93 ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state_.get(), function_name)
94 #define LOCAL_PEEK_GL_ERROR(function_name) \
95 ERRORSTATE_PEEK_GL_ERROR(error_state_.get(), function_name)
96 #define LOCAL_CLEAR_REAL_GL_ERRORS(function_name) \
97 ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state_.get(), function_name)
98 #define LOCAL_PERFORMANCE_WARNING(msg) \
99 PerformanceWarning(__FILE__, __LINE__, msg)
100 #define LOCAL_RENDER_WARNING(msg) RenderWarning(__FILE__, __LINE__, msg)
101
102 namespace gpu {
103 namespace raster {
104
105 namespace {
106
107 base::AtomicSequenceNumber g_raster_decoder_id;
108
109 // This class prevents any GL errors that occur when it is in scope from
110 // being reported to the client.
111 class ScopedGLErrorSuppressor {
112 public:
ScopedGLErrorSuppressor(const char * function_name,gles2::ErrorState * error_state)113 ScopedGLErrorSuppressor(const char* function_name,
114 gles2::ErrorState* error_state)
115 : function_name_(function_name), error_state_(error_state) {
116 ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state_, function_name_);
117 }
~ScopedGLErrorSuppressor()118 ~ScopedGLErrorSuppressor() {
119 ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state_, function_name_);
120 }
121
122 private:
123 const char* function_name_;
124 gles2::ErrorState* error_state_;
125 DISALLOW_COPY_AND_ASSIGN(ScopedGLErrorSuppressor);
126 };
127
128 // Temporarily changes a decoder's bound texture and restore it when this
129 // object goes out of scope. Also temporarily switches to using active texture
130 // unit zero in case the client has changed that to something invalid.
131 class ScopedTextureBinder {
132 public:
ScopedTextureBinder(gles2::ContextState * state,GLenum target,GLuint texture,GrDirectContext * gr_context)133 ScopedTextureBinder(gles2::ContextState* state,
134 GLenum target,
135 GLuint texture,
136 GrDirectContext* gr_context)
137 : state_(state), target_(target) {
138 auto* api = state->api();
139 api->glActiveTextureFn(GL_TEXTURE0);
140 api->glBindTextureFn(target_, texture);
141 if (gr_context)
142 gr_context->resetContext(kTextureBinding_GrGLBackendState);
143 }
144
~ScopedTextureBinder()145 ~ScopedTextureBinder() { state_->api()->glBindTextureFn(target_, 0); }
146
147 private:
148 gles2::ContextState* state_;
149 GLenum target_;
150
151 DISALLOW_COPY_AND_ASSIGN(ScopedTextureBinder);
152 };
153
154 // Temporarily changes a decoder's PIXEL_UNPACK_BUFFER to 0 and set pixel
155 // unpack params to default, and restore them when this object goes out of
156 // scope.
157 class ScopedPixelUnpackState {
158 public:
ScopedPixelUnpackState(gles2::ContextState * state,GrDirectContext * gr_context,const gles2::FeatureInfo * feature_info)159 explicit ScopedPixelUnpackState(gles2::ContextState* state,
160 GrDirectContext* gr_context,
161 const gles2::FeatureInfo* feature_info) {
162 DCHECK(state);
163 auto* api = state->api();
164 api->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, 4);
165 if (feature_info->gl_version_info().is_es3 ||
166 feature_info->gl_version_info().is_desktop_core_profile ||
167 feature_info->feature_flags().ext_pixel_buffer_object)
168 api->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, 0);
169
170 if (feature_info->gl_version_info().is_es3 ||
171 feature_info->gl_version_info().is_desktop_core_profile ||
172 feature_info->feature_flags().ext_unpack_subimage)
173 api->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, 0);
174 if (gr_context) {
175 gr_context->resetContext(kMisc_GrGLBackendState |
176 kPixelStore_GrGLBackendState);
177 }
178 }
179 ~ScopedPixelUnpackState() = default;
180
181 private:
182 DISALLOW_COPY_AND_ASSIGN(ScopedPixelUnpackState);
183 };
184
185 // Commands that are explicitly listed as OK to occur between
186 // BeginRasterCHROMIUM and EndRasterCHROMIUM. They do not invalidate
187 // GrDirectContext state tracking.
AllowedBetweenBeginEndRaster(CommandId command)188 bool AllowedBetweenBeginEndRaster(CommandId command) {
189 switch (command) {
190 case kCreateTransferCacheEntryINTERNAL:
191 case kDeleteTransferCacheEntryINTERNAL:
192 case kEndRasterCHROMIUM:
193 case kFinish:
194 case kFlush:
195 case kGetError:
196 case kRasterCHROMIUM:
197 case kUnlockTransferCacheEntryINTERNAL:
198 return true;
199 default:
200 return false;
201 }
202 }
203
204 // This class is sent to cc::PaintOpReader during paint op deserialization. When
205 // a cc:PaintOp refers to a mailbox-backed cc:PaintImage, this class opens the
206 // shared image for read access and returns an SkImage reference.
207 // SharedImageProviderImpl maintains read access until it is destroyed
208 // which should occur after |end_semaphores| have been flushed to Skia.
209 class SharedImageProviderImpl final : public cc::SharedImageProvider {
210 public:
SharedImageProviderImpl(SharedImageRepresentationFactory * shared_image_factory,scoped_refptr<SharedContextState> shared_context_state,SkSurface * output_surface,std::vector<GrBackendSemaphore> * end_semaphores,gles2::ErrorState * error_state)211 SharedImageProviderImpl(
212 SharedImageRepresentationFactory* shared_image_factory,
213 scoped_refptr<SharedContextState> shared_context_state,
214 SkSurface* output_surface,
215 std::vector<GrBackendSemaphore>* end_semaphores,
216 gles2::ErrorState* error_state)
217 : shared_image_factory_(shared_image_factory),
218 shared_context_state_(std::move(shared_context_state)),
219 output_surface_(output_surface),
220 end_semaphores_(end_semaphores),
221 error_state_(error_state) {
222 DCHECK(shared_image_factory_);
223 DCHECK(shared_context_state_);
224 DCHECK(output_surface_);
225 DCHECK(end_semaphores_);
226 DCHECK(error_state_);
227 }
228 SharedImageProviderImpl(const SharedImageProviderImpl&) = delete;
229 SharedImageProviderImpl& operator=(const SharedImageProviderImpl&) = delete;
230
~SharedImageProviderImpl()231 ~SharedImageProviderImpl() override { read_accessors_.clear(); }
232
OpenSharedImageForRead(const gpu::Mailbox & mailbox)233 sk_sp<SkImage> OpenSharedImageForRead(const gpu::Mailbox& mailbox) override {
234 auto it = read_accessors_.find(mailbox);
235 if (it != read_accessors_.end())
236 return it->second.read_access_sk_image;
237
238 auto shared_image_skia =
239 shared_image_factory_->ProduceSkia(mailbox, shared_context_state_);
240 if (!shared_image_skia) {
241 ERRORSTATE_SET_GL_ERROR(error_state_, GL_INVALID_OPERATION,
242 "SharedImageProviderImpl::OpenSharedImageForRead",
243 ("Attempting to operate on unknown mailbox:" +
244 mailbox.ToDebugString())
245 .c_str());
246 return nullptr;
247 }
248
249 std::vector<GrBackendSemaphore> begin_semaphores;
250 // |end_semaphores_| is owned by RasterDecoderImpl which will handle sending
251 // them to SkCanvas
252 auto scoped_read_access = shared_image_skia->BeginScopedReadAccess(
253 &begin_semaphores, end_semaphores_);
254 if (!scoped_read_access) {
255 ERRORSTATE_SET_GL_ERROR(error_state_, GL_INVALID_OPERATION,
256 "SharedImageProviderImpl::OpenSharedImageForRead",
257 ("Couldn't access shared image for mailbox:" +
258 mailbox.ToDebugString())
259 .c_str());
260 return nullptr;
261 }
262
263 if (!begin_semaphores.empty()) {
264 bool result = output_surface_->wait(begin_semaphores.size(),
265 begin_semaphores.data(),
266 /*deleteSemaphoresAfterWait=*/false);
267 DCHECK(result);
268 }
269
270 auto sk_image =
271 scoped_read_access->CreateSkImage(shared_context_state_->gr_context());
272 if (!sk_image) {
273 ERRORSTATE_SET_GL_ERROR(error_state_, GL_INVALID_OPERATION,
274 "SharedImageProviderImpl::OpenSharedImageForRead",
275 "Couldn't create output SkImage.");
276 return nullptr;
277 }
278
279 read_accessors_[mailbox] = {std::move(shared_image_skia),
280 std::move(scoped_read_access), sk_image};
281 return sk_image;
282 }
283
284 private:
285 SharedImageRepresentationFactory* shared_image_factory_;
286 scoped_refptr<SharedContextState> shared_context_state_;
287 SkSurface* output_surface_;
288 std::vector<GrBackendSemaphore>* end_semaphores_;
289 gles2::ErrorState* error_state_;
290
291 struct SharedImageReadAccess {
292 std::unique_ptr<SharedImageRepresentationSkia> shared_image_skia;
293 std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
294 scoped_read_access;
295 sk_sp<SkImage> read_access_sk_image;
296 };
297 base::flat_map<gpu::Mailbox, SharedImageReadAccess> read_accessors_;
298 };
299
300 } // namespace
301
302 // RasterDecoderImpl uses two separate state trackers (gpu::gles2::ContextState
303 // and GrDirectContext) that cache the current GL driver state. Each class sees
304 // a fraction of the GL calls issued and can easily become inconsistent with GL
305 // state. We guard against that by resetting. But resetting is expensive, so we
306 // avoid it as much as possible.
307 class RasterDecoderImpl final : public RasterDecoder,
308 public gles2::ErrorStateClient,
309 public ServiceFontManager::Client,
310 public SharedContextState::ContextLostObserver {
311 public:
312 RasterDecoderImpl(DecoderClient* client,
313 CommandBufferServiceBase* command_buffer_service,
314 gles2::Outputter* outputter,
315 const GpuFeatureInfo& gpu_feature_info,
316 const GpuPreferences& gpu_preferences,
317 MemoryTracker* memory_tracker,
318 SharedImageManager* shared_image_manager,
319 scoped_refptr<SharedContextState> shared_context_state,
320 bool is_privileged);
321 ~RasterDecoderImpl() override;
322
GetGLES2Util()323 gles2::GLES2Util* GetGLES2Util() override { return &util_; }
324
325 // DecoderContext implementation.
326 base::WeakPtr<DecoderContext> AsWeakPtr() override;
327 ContextResult Initialize(
328 const scoped_refptr<gl::GLSurface>& surface,
329 const scoped_refptr<gl::GLContext>& context,
330 bool offscreen,
331 const gles2::DisallowedFeatures& disallowed_features,
332 const ContextCreationAttribs& attrib_helper) override;
333 void Destroy(bool have_context) override;
334 bool MakeCurrent() override;
335 gl::GLContext* GetGLContext() override;
336 gl::GLSurface* GetGLSurface() override;
GetFeatureInfo() const337 const gles2::FeatureInfo* GetFeatureInfo() const override {
338 return feature_info();
339 }
340 Capabilities GetCapabilities() override;
341 const gles2::ContextState* GetContextState() override;
342
343 // TODO(penghuang): Remove unused context state related methods.
344 void RestoreGlobalState() const override;
345 void ClearAllAttributes() const override;
346 void RestoreAllAttributes() const override;
347 void RestoreState(const gles2::ContextState* prev_state) override;
348 void RestoreActiveTexture() const override;
349 void RestoreAllTextureUnitAndSamplerBindings(
350 const gles2::ContextState* prev_state) const override;
351 void RestoreActiveTextureUnitBinding(unsigned int target) const override;
352 void RestoreBufferBinding(unsigned int target) override;
353 void RestoreBufferBindings() const override;
354 void RestoreFramebufferBindings() const override;
355 void RestoreRenderbufferBindings() override;
356 void RestoreProgramBindings() const override;
357 void RestoreTextureState(unsigned service_id) override;
358 void RestoreTextureUnitBindings(unsigned unit) const override;
359 void RestoreVertexAttribArray(unsigned index) override;
360 void RestoreAllExternalTextureBindingsIfNeeded() override;
361 QueryManager* GetQueryManager() override;
362
363 void SetQueryCallback(unsigned int query_client_id,
364 base::OnceClosure callback) override;
365 gles2::GpuFenceManager* GetGpuFenceManager() override;
366 bool HasPendingQueries() const override;
367 void ProcessPendingQueries(bool did_finish) override;
368 bool HasMoreIdleWork() const override;
369 void PerformIdleWork() override;
370 bool HasPollingWork() const override;
371 void PerformPollingWork() override;
372 TextureBase* GetTextureBase(uint32_t client_id) override;
373 void SetLevelInfo(uint32_t client_id,
374 int level,
375 unsigned internal_format,
376 unsigned width,
377 unsigned height,
378 unsigned depth,
379 unsigned format,
380 unsigned type,
381 const gfx::Rect& cleared_rect) override;
382 bool WasContextLost() const override;
383 bool WasContextLostByRobustnessExtension() const override;
384 void MarkContextLost(error::ContextLostReason reason) override;
385 bool CheckResetStatus() override;
386 void BeginDecoding() override;
387 void EndDecoding() override;
388 const char* GetCommandName(unsigned int command_id) const;
389 error::Error DoCommands(unsigned int num_commands,
390 const volatile void* buffer,
391 int num_entries,
392 int* entries_processed) override;
393 base::StringPiece GetLogPrefix() override;
394 void BindImage(uint32_t client_texture_id,
395 uint32_t texture_target,
396 gl::GLImage* image,
397 bool can_bind_to_sampler) override;
398 gles2::ContextGroup* GetContextGroup() override;
399 gles2::ErrorState* GetErrorState() override;
400 std::unique_ptr<gles2::AbstractTexture> CreateAbstractTexture(
401 GLenum target,
402 GLenum internal_format,
403 GLsizei width,
404 GLsizei height,
405 GLsizei depth,
406 GLint border,
407 GLenum format,
408 GLenum type) override;
409 bool IsCompressedTextureFormat(unsigned format) override;
410 bool ClearLevel(gles2::Texture* texture,
411 unsigned target,
412 int level,
413 unsigned format,
414 unsigned type,
415 int xoffset,
416 int yoffset,
417 int width,
418 int height) override;
419 bool ClearCompressedTextureLevel(gles2::Texture* texture,
420 unsigned target,
421 int level,
422 unsigned format,
423 int width,
424 int height) override;
425 bool ClearCompressedTextureLevel3D(gles2::Texture* texture,
426 unsigned target,
427 int level,
428 unsigned format,
429 int width,
430 int height,
431 int depth) override;
ClearLevel3D(gles2::Texture * texture,unsigned target,int level,unsigned format,unsigned type,int width,int height,int depth)432 bool ClearLevel3D(gles2::Texture* texture,
433 unsigned target,
434 int level,
435 unsigned format,
436 unsigned type,
437 int width,
438 int height,
439 int depth) override {
440 NOTIMPLEMENTED();
441 return false;
442 }
443 int GetRasterDecoderId() const override;
444 int DecoderIdForTest() override;
445 ServiceTransferCache* GetTransferCacheForTest() override;
446 void SetUpForRasterCHROMIUMForTest() override;
447 void SetOOMErrorForTest() override;
448 void DisableFlushWorkaroundForTest() override;
449
450 // ErrorClientState implementation.
451 void OnContextLostError() override;
452 void OnOutOfMemoryError() override;
453
454 gles2::Logger* GetLogger() override;
455
456 void SetIgnoreCachedStateForTest(bool ignore) override;
457 gles2::ImageManager* GetImageManagerForTest() override;
458
459 void SetCopyTextureResourceManagerForTest(
460 gles2::CopyTextureCHROMIUMResourceManager* copy_texture_resource_manager)
461 override;
462
463 // ServiceFontManager::Client implementation.
464 scoped_refptr<Buffer> GetShmBuffer(uint32_t shm_id) override;
465 void ReportProgress() override;
466
467 // SharedContextState::ContextLostObserver implementation.
468 void OnContextLost() override;
469
470 private:
state() const471 gles2::ContextState* state() const {
472 if (use_passthrough_) {
473 NOTREACHED();
474 return nullptr;
475 }
476 return shared_context_state_->context_state();
477 }
api() const478 gl::GLApi* api() const { return api_; }
gr_context() const479 GrDirectContext* gr_context() const {
480 return shared_context_state_->gr_context();
481 }
transfer_cache()482 ServiceTransferCache* transfer_cache() {
483 return shared_context_state_->transfer_cache();
484 }
485
feature_info() const486 const gles2::FeatureInfo* feature_info() const {
487 return shared_context_state_->feature_info();
488 }
489
features() const490 const gles2::FeatureInfo::FeatureFlags& features() const {
491 return feature_info()->feature_flags();
492 }
493
workarounds() const494 const GpuDriverBugWorkarounds& workarounds() const {
495 return feature_info()->workarounds();
496 }
497
FlushToWorkAroundMacCrashes()498 void FlushToWorkAroundMacCrashes() {
499 #if defined(OS_MAC)
500 if (!shared_context_state_->GrContextIsGL())
501 return;
502 // This function does aggressive flushes to work around crashes in the
503 // macOS OpenGL driver.
504 // https://crbug.com/906453
505 if (!flush_workaround_disabled_for_test_) {
506 TRACE_EVENT0("gpu", "RasterDecoderImpl::FlushToWorkAroundMacCrashes");
507 if (gr_context())
508 gr_context()->flushAndSubmit();
509 api()->glFlushFn();
510
511 // Flushes can be expensive, yield to allow interruption after each flush.
512 ExitCommandProcessingEarly();
513 }
514 #endif
515 }
516
gl_version_info()517 const gl::GLVersionInfo& gl_version_info() {
518 return feature_info()->gl_version_info();
519 }
520
521 // Set remaining commands to process to 0 to force DoCommands to return
522 // and allow context preemption and GPU watchdog checks in
523 // CommandExecutor().
524 void ExitCommandProcessingEarly() override;
525
526 template <bool DebugImpl>
527 error::Error DoCommandsImpl(unsigned int num_commands,
528 const volatile void* buffer,
529 int num_entries,
530 int* entries_processed);
531
532 bool GenQueriesEXTHelper(GLsizei n, const GLuint* client_ids);
533 void DeleteQueriesEXTHelper(GLsizei n, const volatile GLuint* client_ids);
534 void DoFinish();
535 void DoFlush();
536 void DoGetIntegerv(GLenum pname, GLint* params, GLsizei params_size);
537 void DoTraceEndCHROMIUM();
538 bool InitializeCopyTexImageBlitter();
539 bool InitializeCopyTextureCHROMIUM();
540 void DoCopySubTextureINTERNAL(GLint xoffset,
541 GLint yoffset,
542 GLint x,
543 GLint y,
544 GLsizei width,
545 GLsizei height,
546 GLboolean unpack_flip_y,
547 const volatile GLbyte* mailboxes);
548 void DoCopySubTextureINTERNALGLPassthrough(GLint xoffset,
549 GLint yoffset,
550 GLint x,
551 GLint y,
552 GLsizei width,
553 GLsizei height,
554 GLboolean unpack_flip_y,
555 const Mailbox& source_mailbox,
556 const Mailbox& dest_mailbox);
557 void DoCopySubTextureINTERNALGL(GLint xoffset,
558 GLint yoffset,
559 GLint x,
560 GLint y,
561 GLsizei width,
562 GLsizei height,
563 GLboolean unpack_flip_y,
564 const Mailbox& source_mailbox,
565 const Mailbox& dest_mailbox);
566 void DoCopySubTextureINTERNALSkia(GLint xoffset,
567 GLint yoffset,
568 GLint x,
569 GLint y,
570 GLsizei width,
571 GLsizei height,
572 GLboolean unpack_flip_y,
573 const Mailbox& source_mailbox,
574 const Mailbox& dest_mailbox);
575 void DoWritePixelsINTERNAL(GLint x_offset,
576 GLint y_offset,
577 GLuint src_width,
578 GLuint src_height,
579 GLuint row_bytes,
580 GLuint src_sk_color_type,
581 GLuint src_sk_alpha_type,
582 GLint shm_id,
583 GLuint shm_offset,
584 GLuint shm_size,
585 const volatile GLbyte* mailbox);
586 void DoReadbackImagePixelsINTERNAL(GLint src_x,
587 GLint src_y,
588 GLuint dst_width,
589 GLuint dst_height,
590 GLuint row_bytes,
591 GLuint dst_sk_color_type,
592 GLuint dst_sk_alpha_type,
593 GLint shm_id,
594 GLuint shm_offset,
595 GLuint pixels_offset,
596 const volatile GLbyte* mailbox);
597 void DoConvertYUVMailboxesToRGBINTERNAL(GLenum yuv_color_space,
598 GLboolean is_nv12,
599 const volatile GLbyte* mailboxes);
600
DoLoseContextCHROMIUM(GLenum current,GLenum other)601 void DoLoseContextCHROMIUM(GLenum current, GLenum other) { NOTIMPLEMENTED(); }
602 void DoBeginRasterCHROMIUM(GLuint sk_color,
603 GLuint msaa_sample_count,
604 GLboolean can_use_lcd_text,
605 const volatile GLbyte* key);
606 void DoRasterCHROMIUM(GLuint raster_shm_id,
607 GLuint raster_shm_offset,
608 GLuint raster_shm_size,
609 GLuint font_shm_id,
610 GLuint font_shm_offset,
611 GLuint font_shm_size);
612 void DoEndRasterCHROMIUM();
613 void DoCreateTransferCacheEntryINTERNAL(GLuint entry_type,
614 GLuint entry_id,
615 GLuint handle_shm_id,
616 GLuint handle_shm_offset,
617 GLuint data_shm_id,
618 GLuint data_shm_offset,
619 GLuint data_size);
620 void DoUnlockTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id);
621 void DoDeleteTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id);
622 void RestoreStateForAttrib(GLuint attrib, bool restore_array_binding);
623 void DeletePaintCacheTextBlobsINTERNALHelper(
624 GLsizei n,
625 const volatile GLuint* paint_cache_ids);
626 void DeletePaintCachePathsINTERNALHelper(
627 GLsizei n,
628 const volatile GLuint* paint_cache_ids);
629 void DoClearPaintCacheINTERNAL();
630
631 // Generates a DDL, if necessary, and compiles shaders requires to raster it.
632 // Returns false each time a shader needed to be compiled and the decoder
633 // should yield. Returns true once all shaders in the DDL have been compiled.
634 bool EnsureDDLReadyForRaster();
635
FlushAndSubmitIfNecessary(SkSurface * surface,std::vector<GrBackendSemaphore> signal_semaphores)636 void FlushAndSubmitIfNecessary(
637 SkSurface* surface,
638 std::vector<GrBackendSemaphore> signal_semaphores) {
639 bool sync_cpu = gpu::ShouldVulkanSyncCpuForSkiaSubmit(
640 shared_context_state_->vk_context_provider());
641 if (signal_semaphores.empty()) {
642 surface->flush();
643 if (sync_cpu)
644 gr_context()->submit(sync_cpu);
645 return;
646 }
647
648 // Always flush the surface even if source_scoped_access.success() is
649 // false, so the begin_semaphores can be released, and end_semaphores can
650 // be signalled.
651 GrFlushInfo flush_info = {
652 .fNumSemaphores = signal_semaphores.size(),
653 .fSignalSemaphores = signal_semaphores.data(),
654 };
655 gpu::AddVulkanCleanupTaskForSkiaFlush(
656 shared_context_state_->vk_context_provider(), &flush_info);
657 auto result = surface->flush(flush_info);
658 // If the |signal_semaphores| is empty, we can deferred the queue
659 // submission.
660 DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
661 gr_context()->submit(sync_cpu);
662 }
663
664 #if defined(NDEBUG)
LogClientServiceMapping(const char *,GLuint,GLuint)665 void LogClientServiceMapping(const char* /* function_name */,
666 GLuint /* client_id */,
667 GLuint /* service_id */) {}
668 template <typename T>
LogClientServiceForInfo(T *,GLuint,const char *)669 void LogClientServiceForInfo(T* /* info */,
670 GLuint /* client_id */,
671 const char* /* function_name */) {}
672 #else
LogClientServiceMapping(const char * function_name,GLuint client_id,GLuint service_id)673 void LogClientServiceMapping(const char* function_name,
674 GLuint client_id,
675 GLuint service_id) {
676 if (gpu_preferences_.enable_gpu_service_logging_gpu) {
677 VLOG(1) << "[" << logger_.GetLogPrefix() << "] " << function_name
678 << ": client_id = " << client_id
679 << ", service_id = " << service_id;
680 }
681 }
682 template <typename T>
LogClientServiceForInfo(T * info,GLuint client_id,const char * function_name)683 void LogClientServiceForInfo(T* info,
684 GLuint client_id,
685 const char* function_name) {
686 if (info) {
687 LogClientServiceMapping(function_name, client_id, info->service_id());
688 }
689 }
690 #endif
691
692 // Generate a member function prototype for each command in an automated and
693 // typesafe way.
694 #define RASTER_CMD_OP(name) \
695 Error Handle##name(uint32_t immediate_data_size, const volatile void* data);
696
697 RASTER_COMMAND_LIST(RASTER_CMD_OP)
698 #undef RASTER_CMD_OP
699
700 typedef error::Error (RasterDecoderImpl::*CmdHandler)(
701 uint32_t immediate_data_size,
702 const volatile void* data);
703
704 // A struct to hold info about each command.
705 struct CommandInfo {
706 CmdHandler cmd_handler;
707 uint8_t arg_flags; // How to handle the arguments for this command
708 uint8_t cmd_flags; // How to handle this command
709 uint16_t arg_count; // How many arguments are expected for this command.
710 };
711
712 // A table of CommandInfo for all the commands.
713 static const CommandInfo command_info[kNumCommands - kFirstRasterCommand];
714
715 const int raster_decoder_id_;
716
717 // Number of commands remaining to be processed in DoCommands().
718 int commands_to_process_ = 0;
719
720 bool supports_gpu_raster_ = false;
721 bool supports_oop_raster_ = false;
722 bool use_passthrough_ = false;
723 bool use_ddl_ = false;
724
725 // The current decoder error communicates the decoder error through command
726 // processing functions that do not return the error value. Should be set
727 // only if not returning an error.
728 error::Error current_decoder_error_ = error::kNoError;
729
730 GpuPreferences gpu_preferences_;
731
732 gles2::DebugMarkerManager debug_marker_manager_;
733 gles2::Logger logger_;
734 std::unique_ptr<gles2::ErrorState> error_state_;
735 bool context_lost_ = false;
736
737 scoped_refptr<SharedContextState> shared_context_state_;
738 std::unique_ptr<Validators> validators_;
739
740 SharedImageRepresentationFactory shared_image_representation_factory_;
741 std::unique_ptr<QueryManager> query_manager_;
742
743 gles2::GLES2Util util_;
744
745 // An optional behaviour to lose the context when OOM.
746 bool lose_context_when_out_of_memory_ = false;
747
748 std::unique_ptr<gles2::CopyTexImageResourceManager> copy_tex_image_blit_;
749 std::unique_ptr<gles2::CopyTextureCHROMIUMResourceManager>
750 copy_texture_chromium_;
751
752 std::unique_ptr<gles2::GPUTracer> gpu_tracer_;
753 const unsigned char* gpu_decoder_category_;
754 static constexpr int gpu_trace_level_ = 2;
755 bool gpu_trace_commands_ = false;
756 bool gpu_debug_commands_ = false;
757
758 // Raster helpers.
759 scoped_refptr<ServiceFontManager> font_manager_;
760 std::unique_ptr<SharedImageRepresentationSkia> shared_image_;
761 std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
762 scoped_shared_image_write_;
763 SkSurface* sk_surface_ = nullptr;
764 std::unique_ptr<SharedImageProviderImpl> paint_op_shared_image_provider_;
765
766 sk_sp<SkSurface> sk_surface_for_testing_;
767 std::vector<GrBackendSemaphore> end_semaphores_;
768 std::unique_ptr<cc::ServicePaintCache> paint_cache_;
769
770 std::unique_ptr<SkDeferredDisplayListRecorder> recorder_;
771 sk_sp<SkDeferredDisplayList> ddl_;
772 base::Optional<SkDeferredDisplayList::ProgramIterator> program_iterator_;
773 SkCanvas* raster_canvas_ = nullptr; // ptr into recorder_ or sk_surface_
774 std::vector<SkDiscardableHandleId> locked_handles_;
775
776 // Tracing helpers.
777 int raster_chromium_id_ = 0;
778
779 // Workaround for https://crbug.com/906453
780 bool flush_workaround_disabled_for_test_ = false;
781
782 bool in_copy_sub_texture_ = false;
783 bool reset_texture_state_ = false;
784
785 bool is_privileged_ = false;
786
787 gl::GLApi* api_ = nullptr;
788
789 base::WeakPtrFactory<DecoderContext> weak_ptr_factory_{this};
790
791 DISALLOW_COPY_AND_ASSIGN(RasterDecoderImpl);
792 };
793
794 constexpr RasterDecoderImpl::CommandInfo RasterDecoderImpl::command_info[] = {
795 #define RASTER_CMD_OP(name) \
796 { \
797 &RasterDecoderImpl::Handle##name, \
798 cmds::name::kArgFlags, \
799 cmds::name::cmd_flags, \
800 sizeof(cmds::name) / sizeof(CommandBufferEntry) - 1, \
801 }, /* NOLINT */
802 RASTER_COMMAND_LIST(RASTER_CMD_OP)
803 #undef RASTER_CMD_OP
804 };
805
806 // static
Create(DecoderClient * client,CommandBufferServiceBase * command_buffer_service,gles2::Outputter * outputter,const GpuFeatureInfo & gpu_feature_info,const GpuPreferences & gpu_preferences,MemoryTracker * memory_tracker,SharedImageManager * shared_image_manager,scoped_refptr<SharedContextState> shared_context_state,bool is_privileged)807 RasterDecoder* RasterDecoder::Create(
808 DecoderClient* client,
809 CommandBufferServiceBase* command_buffer_service,
810 gles2::Outputter* outputter,
811 const GpuFeatureInfo& gpu_feature_info,
812 const GpuPreferences& gpu_preferences,
813 MemoryTracker* memory_tracker,
814 SharedImageManager* shared_image_manager,
815 scoped_refptr<SharedContextState> shared_context_state,
816 bool is_privileged) {
817 return new RasterDecoderImpl(client, command_buffer_service, outputter,
818 gpu_feature_info, gpu_preferences,
819 memory_tracker, shared_image_manager,
820 std::move(shared_context_state), is_privileged);
821 }
822
RasterDecoder(DecoderClient * client,CommandBufferServiceBase * command_buffer_service,gles2::Outputter * outputter)823 RasterDecoder::RasterDecoder(DecoderClient* client,
824 CommandBufferServiceBase* command_buffer_service,
825 gles2::Outputter* outputter)
826 : CommonDecoder(client, command_buffer_service), outputter_(outputter) {}
827
~RasterDecoder()828 RasterDecoder::~RasterDecoder() {}
829
initialized() const830 bool RasterDecoder::initialized() const {
831 return initialized_;
832 }
833
GetTextureBase(uint32_t client_id)834 TextureBase* RasterDecoder::GetTextureBase(uint32_t client_id) {
835 return nullptr;
836 }
837
SetLevelInfo(uint32_t client_id,int level,unsigned internal_format,unsigned width,unsigned height,unsigned depth,unsigned format,unsigned type,const gfx::Rect & cleared_rect)838 void RasterDecoder::SetLevelInfo(uint32_t client_id,
839 int level,
840 unsigned internal_format,
841 unsigned width,
842 unsigned height,
843 unsigned depth,
844 unsigned format,
845 unsigned type,
846 const gfx::Rect& cleared_rect) {}
847
BeginDecoding()848 void RasterDecoder::BeginDecoding() {}
849
EndDecoding()850 void RasterDecoder::EndDecoding() {}
851
SetLogCommands(bool log_commands)852 void RasterDecoder::SetLogCommands(bool log_commands) {
853 log_commands_ = log_commands;
854 }
855
outputter() const856 gles2::Outputter* RasterDecoder::outputter() const {
857 return outputter_;
858 }
859
GetLogPrefix()860 base::StringPiece RasterDecoder::GetLogPrefix() {
861 return GetLogger()->GetLogPrefix();
862 }
863
RasterDecoderImpl(DecoderClient * client,CommandBufferServiceBase * command_buffer_service,gles2::Outputter * outputter,const GpuFeatureInfo & gpu_feature_info,const GpuPreferences & gpu_preferences,MemoryTracker * memory_tracker,SharedImageManager * shared_image_manager,scoped_refptr<SharedContextState> shared_context_state,bool is_privileged)864 RasterDecoderImpl::RasterDecoderImpl(
865 DecoderClient* client,
866 CommandBufferServiceBase* command_buffer_service,
867 gles2::Outputter* outputter,
868 const GpuFeatureInfo& gpu_feature_info,
869 const GpuPreferences& gpu_preferences,
870 MemoryTracker* memory_tracker,
871 SharedImageManager* shared_image_manager,
872 scoped_refptr<SharedContextState> shared_context_state,
873 bool is_privileged)
874 : RasterDecoder(client, command_buffer_service, outputter),
875 raster_decoder_id_(g_raster_decoder_id.GetNext() + 1),
876 supports_gpu_raster_(
877 gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] ==
878 kGpuFeatureStatusEnabled),
879 use_passthrough_(gles2::PassthroughCommandDecoderSupported() &&
880 gpu_preferences.use_passthrough_cmd_decoder),
881 gpu_preferences_(gpu_preferences),
882 logger_(&debug_marker_manager_,
883 base::BindRepeating(&DecoderClient::OnConsoleMessage,
884 base::Unretained(client),
885 0),
886 gpu_preferences_.disable_gl_error_limit),
887 error_state_(gles2::ErrorState::Create(this, &logger_)),
888 shared_context_state_(std::move(shared_context_state)),
889 validators_(new Validators),
890 shared_image_representation_factory_(shared_image_manager,
891 memory_tracker),
892 gpu_decoder_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
893 TRACE_DISABLED_BY_DEFAULT("gpu.decoder"))),
894 font_manager_(base::MakeRefCounted<ServiceFontManager>(
895 this,
896 gpu_preferences_.disable_oopr_debug_crash_dump)),
897 is_privileged_(is_privileged) {
898 DCHECK(shared_context_state_);
899 shared_context_state_->AddContextLostObserver(this);
900 }
901
~RasterDecoderImpl()902 RasterDecoderImpl::~RasterDecoderImpl() {
903 shared_context_state_->RemoveContextLostObserver(this);
904 }
905
AsWeakPtr()906 base::WeakPtr<DecoderContext> RasterDecoderImpl::AsWeakPtr() {
907 return weak_ptr_factory_.GetWeakPtr();
908 }
909
Initialize(const scoped_refptr<gl::GLSurface> & surface,const scoped_refptr<gl::GLContext> & context,bool offscreen,const gles2::DisallowedFeatures & disallowed_features,const ContextCreationAttribs & attrib_helper)910 ContextResult RasterDecoderImpl::Initialize(
911 const scoped_refptr<gl::GLSurface>& surface,
912 const scoped_refptr<gl::GLContext>& context,
913 bool offscreen,
914 const gles2::DisallowedFeatures& disallowed_features,
915 const ContextCreationAttribs& attrib_helper) {
916 TRACE_EVENT0("gpu", "RasterDecoderImpl::Initialize");
917 DCHECK(shared_context_state_->IsCurrent(nullptr));
918
919 api_ = gl::g_current_gl_context;
920
921 set_initialized();
922
923 if (!offscreen) {
924 return ContextResult::kFatalFailure;
925 }
926
927 if (gpu_preferences_.enable_gpu_debugging)
928 set_debug(true);
929
930 if (gpu_preferences_.enable_gpu_command_logging)
931 SetLogCommands(true);
932
933 DCHECK_EQ(surface.get(), shared_context_state_->surface());
934 DCHECK_EQ(context.get(), shared_context_state_->context());
935
936 // Create GPU Tracer for timing values.
937 gpu_tracer_.reset(
938 new gles2::GPUTracer(this, shared_context_state_->GrContextIsGL()));
939
940 // Save the loseContextWhenOutOfMemory context creation attribute.
941 lose_context_when_out_of_memory_ =
942 attrib_helper.lose_context_when_out_of_memory;
943
944 CHECK_GL_ERROR();
945
946 query_manager_ = std::make_unique<QueryManager>();
947
948 if (attrib_helper.enable_oop_rasterization) {
949 if (!features().chromium_raster_transport) {
950 LOG(ERROR) << "ContextResult::kFatalFailure: "
951 "chromium_raster_transport not present";
952 Destroy(true);
953 return ContextResult::kFatalFailure;
954 }
955
956 supports_oop_raster_ = !!shared_context_state_->gr_context();
957 if (supports_oop_raster_)
958 paint_cache_ = std::make_unique<cc::ServicePaintCache>();
959 use_ddl_ = gpu_preferences_.enable_oop_rasterization_ddl;
960 }
961
962 return ContextResult::kSuccess;
963 }
964
Destroy(bool have_context)965 void RasterDecoderImpl::Destroy(bool have_context) {
966 if (!initialized())
967 return;
968
969 DCHECK(!have_context || shared_context_state_->context()->IsCurrent(nullptr));
970
971 if (have_context) {
972 if (supports_oop_raster_) {
973 transfer_cache()->DeleteAllEntriesForDecoder(raster_decoder_id_);
974 }
975
976 if (copy_tex_image_blit_) {
977 copy_tex_image_blit_->Destroy();
978 copy_tex_image_blit_.reset();
979 }
980
981 if (copy_texture_chromium_) {
982 copy_texture_chromium_->Destroy();
983 copy_texture_chromium_.reset();
984 }
985
986 // Make sure we flush any pending skia work on this context.
987 if (sk_surface_) {
988 GrFlushInfo flush_info = {
989 .fNumSemaphores = end_semaphores_.size(),
990 .fSignalSemaphores = end_semaphores_.data(),
991 };
992 AddVulkanCleanupTaskForSkiaFlush(
993 shared_context_state_->vk_context_provider(), &flush_info);
994 auto result = sk_surface_->flush(flush_info);
995 DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
996 end_semaphores_.clear();
997 sk_surface_ = nullptr;
998 }
999
1000 if (gr_context())
1001 gr_context()->flushAndSubmit();
1002
1003 scoped_shared_image_write_.reset();
1004 shared_image_.reset();
1005 sk_surface_for_testing_.reset();
1006 paint_op_shared_image_provider_.reset();
1007 }
1008
1009 copy_tex_image_blit_.reset();
1010 copy_texture_chromium_.reset();
1011
1012 if (query_manager_) {
1013 query_manager_->Destroy(have_context);
1014 query_manager_.reset();
1015 }
1016
1017 font_manager_->Destroy();
1018 font_manager_.reset();
1019 }
1020
1021 // Make this decoder's GL context current.
MakeCurrent()1022 bool RasterDecoderImpl::MakeCurrent() {
1023 if (!shared_context_state_->GrContextIsGL())
1024 return true;
1025
1026 if (context_lost_) {
1027 LOG(ERROR) << " RasterDecoderImpl: Trying to make lost context current.";
1028 return false;
1029 }
1030
1031 if (shared_context_state_->context_lost() ||
1032 !shared_context_state_->MakeCurrent(nullptr)) {
1033 LOG(ERROR) << " RasterDecoderImpl: Context lost during MakeCurrent.";
1034 return false;
1035 }
1036
1037 DCHECK_EQ(api(), gl::g_current_gl_context);
1038
1039 // Rebind textures if the service ids may have changed.
1040 RestoreAllExternalTextureBindingsIfNeeded();
1041
1042 return true;
1043 }
1044
GetGLContext()1045 gl::GLContext* RasterDecoderImpl::GetGLContext() {
1046 return shared_context_state_->GrContextIsGL()
1047 ? shared_context_state_->context()
1048 : nullptr;
1049 }
1050
GetGLSurface()1051 gl::GLSurface* RasterDecoderImpl::GetGLSurface() {
1052 return shared_context_state_->GrContextIsGL()
1053 ? shared_context_state_->surface()
1054 : nullptr;
1055 }
1056
GetCapabilities()1057 Capabilities RasterDecoderImpl::GetCapabilities() {
1058 // TODO(enne): reconcile this with gles2_cmd_decoder's capability settings.
1059 Capabilities caps;
1060 caps.gpu_rasterization = supports_gpu_raster_;
1061 caps.supports_oop_raster = supports_oop_raster_;
1062 caps.gpu_memory_buffer_formats =
1063 feature_info()->feature_flags().gpu_memory_buffer_formats;
1064 caps.texture_target_exception_list =
1065 gpu_preferences_.texture_target_exception_list;
1066 caps.texture_format_bgra8888 =
1067 feature_info()->feature_flags().ext_texture_format_bgra8888;
1068 caps.texture_storage_image =
1069 feature_info()->feature_flags().chromium_texture_storage_image;
1070 caps.texture_storage = feature_info()->feature_flags().ext_texture_storage;
1071 // TODO(piman): have a consistent limit in shared image backings.
1072 // https://crbug.com/960588
1073 if (shared_context_state_->GrContextIsGL()) {
1074 api()->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &caps.max_texture_size);
1075 } else if (shared_context_state_->GrContextIsVulkan()) {
1076 #if BUILDFLAG(ENABLE_VULKAN)
1077 caps.max_texture_size = shared_context_state_->vk_context_provider()
1078 ->GetDeviceQueue()
1079 ->vk_physical_device_properties()
1080 .limits.maxImageDimension2D;
1081 #else
1082 NOTREACHED();
1083 #endif
1084 } else if (shared_context_state_->GrContextIsDawn()) {
1085 // TODO(crbug.com/1090476): Query Dawn for this value once an API exists for
1086 // capabilities.
1087 caps.max_texture_size = 8192;
1088 } else {
1089 NOTIMPLEMENTED();
1090 }
1091 if (feature_info()->workarounds().max_texture_size) {
1092 caps.max_texture_size = std::min(
1093 caps.max_texture_size, feature_info()->workarounds().max_texture_size);
1094 caps.max_cube_map_texture_size =
1095 std::min(caps.max_cube_map_texture_size,
1096 feature_info()->workarounds().max_texture_size);
1097 }
1098 if (feature_info()->workarounds().max_3d_array_texture_size) {
1099 caps.max_3d_texture_size =
1100 std::min(caps.max_3d_texture_size,
1101 feature_info()->workarounds().max_3d_array_texture_size);
1102 caps.max_array_texture_layers =
1103 std::min(caps.max_array_texture_layers,
1104 feature_info()->workarounds().max_3d_array_texture_size);
1105 }
1106 caps.sync_query = feature_info()->feature_flags().chromium_sync_query;
1107 caps.msaa_is_slow = feature_info()->workarounds().msaa_is_slow;
1108 caps.avoid_stencil_buffers =
1109 feature_info()->workarounds().avoid_stencil_buffers;
1110
1111 if (gr_context()) {
1112 caps.context_supports_distance_field_text =
1113 gr_context()->supportsDistanceFieldText();
1114 caps.texture_norm16 =
1115 gr_context()->colorTypeSupportedAsImage(kA16_unorm_SkColorType);
1116 caps.texture_half_float_linear =
1117 gr_context()->colorTypeSupportedAsImage(kA16_float_SkColorType);
1118 } else {
1119 caps.texture_norm16 = feature_info()->feature_flags().ext_texture_norm16;
1120 caps.texture_half_float_linear =
1121 feature_info()->feature_flags().enable_texture_half_float_linear;
1122 }
1123 return caps;
1124 }
1125
GetContextState()1126 const gles2::ContextState* RasterDecoderImpl::GetContextState() {
1127 NOTREACHED();
1128 return nullptr;
1129 }
1130
RestoreGlobalState() const1131 void RasterDecoderImpl::RestoreGlobalState() const {
1132 // We mark the context state is dirty instead of restoring global
1133 // state, and the global state will be restored by the next context.
1134 shared_context_state_->set_need_context_state_reset(true);
1135 shared_context_state_->PessimisticallyResetGrContext();
1136 }
1137
ClearAllAttributes() const1138 void RasterDecoderImpl::ClearAllAttributes() const {}
1139
RestoreAllAttributes() const1140 void RasterDecoderImpl::RestoreAllAttributes() const {
1141 shared_context_state_->PessimisticallyResetGrContext();
1142 }
1143
RestoreState(const gles2::ContextState * prev_state)1144 void RasterDecoderImpl::RestoreState(const gles2::ContextState* prev_state) {
1145 shared_context_state_->PessimisticallyResetGrContext();
1146 }
1147
RestoreActiveTexture() const1148 void RasterDecoderImpl::RestoreActiveTexture() const {
1149 shared_context_state_->PessimisticallyResetGrContext();
1150 }
1151
RestoreAllTextureUnitAndSamplerBindings(const gles2::ContextState * prev_state) const1152 void RasterDecoderImpl::RestoreAllTextureUnitAndSamplerBindings(
1153 const gles2::ContextState* prev_state) const {
1154 shared_context_state_->PessimisticallyResetGrContext();
1155 }
1156
RestoreActiveTextureUnitBinding(unsigned int target) const1157 void RasterDecoderImpl::RestoreActiveTextureUnitBinding(
1158 unsigned int target) const {
1159 shared_context_state_->PessimisticallyResetGrContext();
1160 }
1161
RestoreBufferBinding(unsigned int target)1162 void RasterDecoderImpl::RestoreBufferBinding(unsigned int target) {
1163 shared_context_state_->PessimisticallyResetGrContext();
1164 }
1165
RestoreBufferBindings() const1166 void RasterDecoderImpl::RestoreBufferBindings() const {
1167 shared_context_state_->PessimisticallyResetGrContext();
1168 }
1169
RestoreFramebufferBindings() const1170 void RasterDecoderImpl::RestoreFramebufferBindings() const {
1171 shared_context_state_->PessimisticallyResetGrContext();
1172 }
1173
RestoreRenderbufferBindings()1174 void RasterDecoderImpl::RestoreRenderbufferBindings() {
1175 shared_context_state_->PessimisticallyResetGrContext();
1176 }
1177
RestoreProgramBindings() const1178 void RasterDecoderImpl::RestoreProgramBindings() const {
1179 shared_context_state_->PessimisticallyResetGrContext();
1180 }
1181
RestoreTextureState(unsigned service_id)1182 void RasterDecoderImpl::RestoreTextureState(unsigned service_id) {
1183 DCHECK(in_copy_sub_texture_);
1184 reset_texture_state_ = true;
1185 }
1186
RestoreTextureUnitBindings(unsigned unit) const1187 void RasterDecoderImpl::RestoreTextureUnitBindings(unsigned unit) const {
1188 shared_context_state_->PessimisticallyResetGrContext();
1189 }
1190
RestoreVertexAttribArray(unsigned index)1191 void RasterDecoderImpl::RestoreVertexAttribArray(unsigned index) {
1192 shared_context_state_->PessimisticallyResetGrContext();
1193 }
1194
RestoreAllExternalTextureBindingsIfNeeded()1195 void RasterDecoderImpl::RestoreAllExternalTextureBindingsIfNeeded() {
1196 shared_context_state_->PessimisticallyResetGrContext();
1197 }
1198
GetQueryManager()1199 QueryManager* RasterDecoderImpl::GetQueryManager() {
1200 return query_manager_.get();
1201 }
1202
SetQueryCallback(unsigned int query_client_id,base::OnceClosure callback)1203 void RasterDecoderImpl::SetQueryCallback(unsigned int query_client_id,
1204 base::OnceClosure callback) {
1205 QueryManager::Query* query = query_manager_->GetQuery(query_client_id);
1206 if (query) {
1207 query->AddCallback(std::move(callback));
1208 } else {
1209 VLOG(1) << "RasterDecoderImpl::SetQueryCallback: No query with ID "
1210 << query_client_id << ". Running the callback immediately.";
1211 std::move(callback).Run();
1212 }
1213 }
1214
GetGpuFenceManager()1215 gles2::GpuFenceManager* RasterDecoderImpl::GetGpuFenceManager() {
1216 NOTIMPLEMENTED();
1217 return nullptr;
1218 }
1219
HasPendingQueries() const1220 bool RasterDecoderImpl::HasPendingQueries() const {
1221 return query_manager_ && query_manager_->HavePendingQueries();
1222 }
1223
ProcessPendingQueries(bool did_finish)1224 void RasterDecoderImpl::ProcessPendingQueries(bool did_finish) {
1225 if (query_manager_)
1226 query_manager_->ProcessPendingQueries(did_finish);
1227 }
1228
HasMoreIdleWork() const1229 bool RasterDecoderImpl::HasMoreIdleWork() const {
1230 return gpu_tracer_->HasTracesToProcess();
1231 }
1232
PerformIdleWork()1233 void RasterDecoderImpl::PerformIdleWork() {
1234 gpu_tracer_->ProcessTraces();
1235 }
1236
HasPollingWork() const1237 bool RasterDecoderImpl::HasPollingWork() const {
1238 return false;
1239 }
1240
PerformPollingWork()1241 void RasterDecoderImpl::PerformPollingWork() {}
1242
GetTextureBase(uint32_t client_id)1243 TextureBase* RasterDecoderImpl::GetTextureBase(uint32_t client_id) {
1244 NOTIMPLEMENTED();
1245 return nullptr;
1246 }
1247
SetLevelInfo(uint32_t client_id,int level,unsigned internal_format,unsigned width,unsigned height,unsigned depth,unsigned format,unsigned type,const gfx::Rect & cleared_rect)1248 void RasterDecoderImpl::SetLevelInfo(uint32_t client_id,
1249 int level,
1250 unsigned internal_format,
1251 unsigned width,
1252 unsigned height,
1253 unsigned depth,
1254 unsigned format,
1255 unsigned type,
1256 const gfx::Rect& cleared_rect) {
1257 NOTIMPLEMENTED();
1258 }
1259
WasContextLost() const1260 bool RasterDecoderImpl::WasContextLost() const {
1261 return shared_context_state_->context_lost();
1262 }
1263
WasContextLostByRobustnessExtension() const1264 bool RasterDecoderImpl::WasContextLostByRobustnessExtension() const {
1265 return shared_context_state_->device_needs_reset();
1266 }
1267
MarkContextLost(error::ContextLostReason reason)1268 void RasterDecoderImpl::MarkContextLost(error::ContextLostReason reason) {
1269 shared_context_state_->MarkContextLost(reason);
1270 }
1271
OnContextLost()1272 void RasterDecoderImpl::OnContextLost() {
1273 DCHECK(shared_context_state_->context_lost());
1274 command_buffer_service()->SetContextLostReason(
1275 *shared_context_state_->context_lost_reason());
1276 current_decoder_error_ = error::kLostContext;
1277 }
1278
CheckResetStatus()1279 bool RasterDecoderImpl::CheckResetStatus() {
1280 DCHECK(!WasContextLost());
1281 return shared_context_state_->CheckResetStatus(/*needs_gl=*/false);
1282 }
1283
GetLogger()1284 gles2::Logger* RasterDecoderImpl::GetLogger() {
1285 return &logger_;
1286 }
1287
SetIgnoreCachedStateForTest(bool ignore)1288 void RasterDecoderImpl::SetIgnoreCachedStateForTest(bool ignore) {
1289 if (use_passthrough_)
1290 return;
1291 state()->SetIgnoreCachedStateForTest(ignore);
1292 }
1293
GetImageManagerForTest()1294 gles2::ImageManager* RasterDecoderImpl::GetImageManagerForTest() {
1295 NOTREACHED();
1296 return nullptr;
1297 }
1298
SetCopyTextureResourceManagerForTest(gles2::CopyTextureCHROMIUMResourceManager * copy_texture_resource_manager)1299 void RasterDecoderImpl::SetCopyTextureResourceManagerForTest(
1300 gles2::CopyTextureCHROMIUMResourceManager* copy_texture_resource_manager) {
1301 copy_texture_chromium_.reset(copy_texture_resource_manager);
1302 }
1303
BeginDecoding()1304 void RasterDecoderImpl::BeginDecoding() {
1305 gpu_tracer_->BeginDecoding();
1306 gpu_trace_commands_ = gpu_tracer_->IsTracing() && *gpu_decoder_category_;
1307 gpu_debug_commands_ = log_commands() || debug() || gpu_trace_commands_;
1308 query_manager_->BeginProcessingCommands();
1309 }
1310
EndDecoding()1311 void RasterDecoderImpl::EndDecoding() {
1312 gpu_tracer_->EndDecoding();
1313 query_manager_->EndProcessingCommands();
1314 }
1315
GetCommandName(unsigned int command_id) const1316 const char* RasterDecoderImpl::GetCommandName(unsigned int command_id) const {
1317 if (command_id >= kFirstRasterCommand && command_id < kNumCommands) {
1318 return raster::GetCommandName(static_cast<CommandId>(command_id));
1319 }
1320 return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
1321 }
1322
1323 template <bool DebugImpl>
DoCommandsImpl(unsigned int num_commands,const volatile void * buffer,int num_entries,int * entries_processed)1324 error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
1325 const volatile void* buffer,
1326 int num_entries,
1327 int* entries_processed) {
1328 DCHECK(entries_processed);
1329 commands_to_process_ = num_commands;
1330 error::Error result = error::kNoError;
1331 const volatile CommandBufferEntry* cmd_data =
1332 static_cast<const volatile CommandBufferEntry*>(buffer);
1333 int process_pos = 0;
1334 CommandId command = static_cast<CommandId>(0);
1335
1336 while (process_pos < num_entries && result == error::kNoError &&
1337 commands_to_process_--) {
1338 const unsigned int size = cmd_data->value_header.size;
1339 command = static_cast<CommandId>(cmd_data->value_header.command);
1340
1341 if (size == 0) {
1342 result = error::kInvalidSize;
1343 break;
1344 }
1345
1346 if (static_cast<int>(size) + process_pos > num_entries) {
1347 result = error::kOutOfBounds;
1348 break;
1349 }
1350
1351 if (DebugImpl && log_commands()) {
1352 LOG(ERROR) << "[" << logger_.GetLogPrefix() << "]"
1353 << "cmd: " << GetCommandName(command);
1354 }
1355
1356 const unsigned int arg_count = size - 1;
1357 unsigned int command_index = command - kFirstRasterCommand;
1358 if (command_index < base::size(command_info)) {
1359 const CommandInfo& info = command_info[command_index];
1360 if (sk_surface_) {
1361 if (!AllowedBetweenBeginEndRaster(command)) {
1362 LOCAL_SET_GL_ERROR(
1363 GL_INVALID_OPERATION, GetCommandName(command),
1364 "Unexpected command between BeginRasterCHROMIUM and "
1365 "EndRasterCHROMIUM");
1366 process_pos += size;
1367 cmd_data += size;
1368 continue;
1369 }
1370 }
1371 unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
1372 if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
1373 (info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
1374 bool doing_gpu_trace = false;
1375 if (DebugImpl && gpu_trace_commands_) {
1376 if (CMD_FLAG_GET_TRACE_LEVEL(info.cmd_flags) <= gpu_trace_level_) {
1377 doing_gpu_trace = true;
1378 gpu_tracer_->Begin(TRACE_DISABLED_BY_DEFAULT("gpu.decoder"),
1379 GetCommandName(command), gles2::kTraceDecoder);
1380 }
1381 }
1382
1383 uint32_t immediate_data_size = (arg_count - info_arg_count) *
1384 sizeof(CommandBufferEntry); // NOLINT
1385 result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
1386
1387 if (DebugImpl && doing_gpu_trace)
1388 gpu_tracer_->End(gles2::kTraceDecoder);
1389
1390 if (DebugImpl && shared_context_state_->GrContextIsGL() && debug() &&
1391 !WasContextLost()) {
1392 GLenum error;
1393 while ((error = api()->glGetErrorFn()) != GL_NO_ERROR) {
1394 LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
1395 << "GL ERROR: " << gles2::GLES2Util::GetStringEnum(error)
1396 << " : " << GetCommandName(command);
1397 LOCAL_SET_GL_ERROR(error, "DoCommand", "GL error from driver");
1398 }
1399 }
1400 } else {
1401 result = error::kInvalidArguments;
1402 }
1403 } else {
1404 result = DoCommonCommand(command, arg_count, cmd_data);
1405 }
1406
1407 if (result == error::kNoError &&
1408 current_decoder_error_ != error::kNoError) {
1409 result = current_decoder_error_;
1410 current_decoder_error_ = error::kNoError;
1411 }
1412
1413 if (result != error::kDeferCommandUntilLater) {
1414 process_pos += size;
1415 cmd_data += size;
1416 }
1417
1418 // Workaround for https://crbug.com/906453: Flush after every command that
1419 // is not between a BeginRaster and EndRaster.
1420 if (!sk_surface_)
1421 FlushToWorkAroundMacCrashes();
1422 }
1423
1424 *entries_processed = process_pos;
1425
1426 if (error::IsError(result)) {
1427 LOG(ERROR) << "Error: " << result << " for Command "
1428 << GetCommandName(command);
1429 }
1430
1431 if (supports_oop_raster_)
1432 client()->ScheduleGrContextCleanup();
1433
1434 return result;
1435 }
1436
DoCommands(unsigned int num_commands,const volatile void * buffer,int num_entries,int * entries_processed)1437 error::Error RasterDecoderImpl::DoCommands(unsigned int num_commands,
1438 const volatile void* buffer,
1439 int num_entries,
1440 int* entries_processed) {
1441 if (gpu_debug_commands_) {
1442 return DoCommandsImpl<true>(num_commands, buffer, num_entries,
1443 entries_processed);
1444 } else {
1445 return DoCommandsImpl<false>(num_commands, buffer, num_entries,
1446 entries_processed);
1447 }
1448 }
1449
ExitCommandProcessingEarly()1450 void RasterDecoderImpl::ExitCommandProcessingEarly() {
1451 commands_to_process_ = 0;
1452 }
1453
GetLogPrefix()1454 base::StringPiece RasterDecoderImpl::GetLogPrefix() {
1455 return logger_.GetLogPrefix();
1456 }
1457
BindImage(uint32_t client_texture_id,uint32_t texture_target,gl::GLImage * image,bool can_bind_to_sampler)1458 void RasterDecoderImpl::BindImage(uint32_t client_texture_id,
1459 uint32_t texture_target,
1460 gl::GLImage* image,
1461 bool can_bind_to_sampler) {
1462 NOTIMPLEMENTED();
1463 }
1464
GetContextGroup()1465 gles2::ContextGroup* RasterDecoderImpl::GetContextGroup() {
1466 return nullptr;
1467 }
1468
GetErrorState()1469 gles2::ErrorState* RasterDecoderImpl::GetErrorState() {
1470 return error_state_.get();
1471 }
1472
1473 std::unique_ptr<gles2::AbstractTexture>
CreateAbstractTexture(GLenum target,GLenum internal_format,GLsizei width,GLsizei height,GLsizei depth,GLint border,GLenum format,GLenum type)1474 RasterDecoderImpl::CreateAbstractTexture(GLenum target,
1475 GLenum internal_format,
1476 GLsizei width,
1477 GLsizei height,
1478 GLsizei depth,
1479 GLint border,
1480 GLenum format,
1481 GLenum type) {
1482 return nullptr;
1483 }
1484
IsCompressedTextureFormat(unsigned format)1485 bool RasterDecoderImpl::IsCompressedTextureFormat(unsigned format) {
1486 return feature_info()->validators()->compressed_texture_format.IsValid(
1487 format);
1488 }
1489
ClearLevel(gles2::Texture * texture,unsigned target,int level,unsigned format,unsigned type,int xoffset,int yoffset,int width,int height)1490 bool RasterDecoderImpl::ClearLevel(gles2::Texture* texture,
1491 unsigned target,
1492 int level,
1493 unsigned format,
1494 unsigned type,
1495 int xoffset,
1496 int yoffset,
1497 int width,
1498 int height) {
1499 DCHECK(target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY &&
1500 target != GL_TEXTURE_EXTERNAL_OES);
1501 uint32_t channels = gles2::GLES2Util::GetChannelsForFormat(format);
1502 if (channels & gles2::GLES2Util::kDepth) {
1503 DCHECK(false) << "depth not supported";
1504 return false;
1505 }
1506
1507 static constexpr uint32_t kMaxZeroSize = 1024 * 1024 * 4;
1508
1509 uint32_t size;
1510 uint32_t padded_row_size;
1511 constexpr GLint unpack_alignment = 4;
1512 if (!gles2::GLES2Util::ComputeImageDataSizes(width, height, 1, format, type,
1513 unpack_alignment, &size, nullptr,
1514 &padded_row_size)) {
1515 return false;
1516 }
1517
1518 TRACE_EVENT1("gpu", "RasterDecoderImpl::ClearLevel", "size", size);
1519
1520 int tile_height;
1521
1522 if (size > kMaxZeroSize) {
1523 if (kMaxZeroSize < padded_row_size) {
1524 // That'd be an awfully large texture.
1525 return false;
1526 }
1527 // We should never have a large total size with a zero row size.
1528 DCHECK_GT(padded_row_size, 0U);
1529 tile_height = kMaxZeroSize / padded_row_size;
1530 if (!gles2::GLES2Util::ComputeImageDataSizes(width, tile_height, 1, format,
1531 type, unpack_alignment, &size,
1532 nullptr, nullptr)) {
1533 return false;
1534 }
1535 } else {
1536 tile_height = height;
1537 }
1538
1539 {
1540 ScopedTextureBinder binder(state(), texture->target(),
1541 texture->service_id(), gr_context());
1542 base::Optional<ScopedPixelUnpackState> pixel_unpack_state;
1543 if (shared_context_state_->need_context_state_reset()) {
1544 pixel_unpack_state.emplace(state(), gr_context(), feature_info());
1545 }
1546 // Add extra scope to destroy zero and the object it owns right
1547 // after its usage.
1548 // Assumes the size has already been checked.
1549 std::unique_ptr<char[]> zero(new char[size]);
1550 memset(zero.get(), 0, size);
1551 GLint y = 0;
1552 while (y < height) {
1553 GLint h = y + tile_height > height ? height - y : tile_height;
1554 api()->glTexSubImage2DFn(
1555 target, level, xoffset, yoffset + y, width, h,
1556 gles2::TextureManager::AdjustTexFormat(feature_info(), format), type,
1557 zero.get());
1558 y += tile_height;
1559 }
1560 }
1561 DCHECK(glGetError() == GL_NO_ERROR);
1562 return true;
1563 }
1564
ClearCompressedTextureLevel(gles2::Texture * texture,unsigned target,int level,unsigned format,int width,int height)1565 bool RasterDecoderImpl::ClearCompressedTextureLevel(gles2::Texture* texture,
1566 unsigned target,
1567 int level,
1568 unsigned format,
1569 int width,
1570 int height) {
1571 NOTREACHED();
1572 return false;
1573 }
1574
ClearCompressedTextureLevel3D(gles2::Texture * texture,unsigned target,int level,unsigned format,int width,int height,int depth)1575 bool RasterDecoderImpl::ClearCompressedTextureLevel3D(gles2::Texture* texture,
1576 unsigned target,
1577 int level,
1578 unsigned format,
1579 int width,
1580 int height,
1581 int depth) {
1582 NOTREACHED();
1583 return false;
1584 }
1585
GetRasterDecoderId() const1586 int RasterDecoderImpl::GetRasterDecoderId() const {
1587 return raster_decoder_id_;
1588 }
1589
DecoderIdForTest()1590 int RasterDecoderImpl::DecoderIdForTest() {
1591 return raster_decoder_id_;
1592 }
1593
GetTransferCacheForTest()1594 ServiceTransferCache* RasterDecoderImpl::GetTransferCacheForTest() {
1595 return shared_context_state_->transfer_cache();
1596 }
1597
SetUpForRasterCHROMIUMForTest()1598 void RasterDecoderImpl::SetUpForRasterCHROMIUMForTest() {
1599 // Some tests use mock GL which doesn't work with skia. Just use a bitmap
1600 // backed surface for OOP raster commands.
1601 auto info = SkImageInfo::MakeN32(10, 10, kPremul_SkAlphaType,
1602 SkColorSpace::MakeSRGB());
1603 SkSurfaceProps props = skia::LegacyDisplayGlobals::GetSkSurfaceProps();
1604 sk_surface_for_testing_ = SkSurface::MakeRaster(info, &props);
1605 sk_surface_ = sk_surface_for_testing_.get();
1606 raster_canvas_ = sk_surface_->getCanvas();
1607 }
1608
SetOOMErrorForTest()1609 void RasterDecoderImpl::SetOOMErrorForTest() {
1610 LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "SetOOMErrorForTest",
1611 "synthetic out of memory");
1612 }
1613
DisableFlushWorkaroundForTest()1614 void RasterDecoderImpl::DisableFlushWorkaroundForTest() {
1615 flush_workaround_disabled_for_test_ = true;
1616 }
1617
OnContextLostError()1618 void RasterDecoderImpl::OnContextLostError() {
1619 if (!WasContextLost()) {
1620 // Need to lose current context before broadcasting!
1621 shared_context_state_->CheckResetStatus(/*needs_gl=*/false);
1622 }
1623 }
1624
OnOutOfMemoryError()1625 void RasterDecoderImpl::OnOutOfMemoryError() {
1626 if (lose_context_when_out_of_memory_ && !WasContextLost()) {
1627 if (!shared_context_state_->CheckResetStatus(/*needs_gl=*/false)) {
1628 MarkContextLost(error::kOutOfMemory);
1629 }
1630 }
1631 }
1632
HandleBeginQueryEXT(uint32_t immediate_data_size,const volatile void * cmd_data)1633 error::Error RasterDecoderImpl::HandleBeginQueryEXT(
1634 uint32_t immediate_data_size,
1635 const volatile void* cmd_data) {
1636 const volatile raster::cmds::BeginQueryEXT& c =
1637 *static_cast<const volatile raster::cmds::BeginQueryEXT*>(cmd_data);
1638 GLenum target = static_cast<GLenum>(c.target);
1639 GLuint client_id = static_cast<GLuint>(c.id);
1640 int32_t sync_shm_id = static_cast<int32_t>(c.sync_data_shm_id);
1641 uint32_t sync_shm_offset = static_cast<uint32_t>(c.sync_data_shm_offset);
1642
1643 switch (target) {
1644 case GL_COMMANDS_ISSUED_CHROMIUM:
1645 break;
1646 case GL_COMMANDS_COMPLETED_CHROMIUM:
1647 if (!features().chromium_sync_query) {
1648 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT",
1649 "not enabled for commands completed queries");
1650 return error::kNoError;
1651 }
1652 break;
1653 default:
1654 LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "glBeginQueryEXT",
1655 "unknown query target");
1656 return error::kNoError;
1657 }
1658
1659 if (query_manager_->GetActiveQuery(target)) {
1660 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT",
1661 "query already in progress");
1662 return error::kNoError;
1663 }
1664
1665 if (client_id == 0) {
1666 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0");
1667 return error::kNoError;
1668 }
1669
1670 scoped_refptr<Buffer> buffer = GetSharedMemoryBuffer(sync_shm_id);
1671 if (!buffer)
1672 return error::kInvalidArguments;
1673 QuerySync* sync = static_cast<QuerySync*>(
1674 buffer->GetDataAddress(sync_shm_offset, sizeof(QuerySync)));
1675 if (!sync)
1676 return error::kOutOfBounds;
1677
1678 QueryManager::Query* query = query_manager_->GetQuery(client_id);
1679 if (!query) {
1680 if (!query_manager_->IsValidQuery(client_id)) {
1681 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT",
1682 "id not made by glGenQueriesEXT");
1683 return error::kNoError;
1684 }
1685
1686 query =
1687 query_manager_->CreateQuery(target, client_id, std::move(buffer), sync);
1688 } else {
1689 if (query->target() != target) {
1690 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT",
1691 "target does not match");
1692 return error::kNoError;
1693 } else if (query->sync() != sync) {
1694 DLOG(ERROR) << "Shared memory used by query not the same as before";
1695 return error::kInvalidArguments;
1696 }
1697 }
1698
1699 query_manager_->BeginQuery(query);
1700 return error::kNoError;
1701 }
1702
HandleEndQueryEXT(uint32_t immediate_data_size,const volatile void * cmd_data)1703 error::Error RasterDecoderImpl::HandleEndQueryEXT(
1704 uint32_t immediate_data_size,
1705 const volatile void* cmd_data) {
1706 const volatile raster::cmds::EndQueryEXT& c =
1707 *static_cast<const volatile raster::cmds::EndQueryEXT*>(cmd_data);
1708 GLenum target = static_cast<GLenum>(c.target);
1709 uint32_t submit_count = static_cast<GLuint>(c.submit_count);
1710
1711 QueryManager::Query* query = query_manager_->GetActiveQuery(target);
1712 if (!query) {
1713 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glEndQueryEXT",
1714 "No active query");
1715 return error::kNoError;
1716 }
1717
1718 query_manager_->EndQuery(query, submit_count);
1719 return error::kNoError;
1720 }
1721
HandleQueryCounterEXT(uint32_t immediate_data_size,const volatile void * cmd_data)1722 error::Error RasterDecoderImpl::HandleQueryCounterEXT(
1723 uint32_t immediate_data_size,
1724 const volatile void* cmd_data) {
1725 const volatile raster::cmds::QueryCounterEXT& c =
1726 *static_cast<const volatile raster::cmds::QueryCounterEXT*>(cmd_data);
1727 GLenum target = static_cast<GLenum>(c.target);
1728 GLuint client_id = static_cast<GLuint>(c.id);
1729 int32_t sync_shm_id = static_cast<int32_t>(c.sync_data_shm_id);
1730 uint32_t sync_shm_offset = static_cast<uint32_t>(c.sync_data_shm_offset);
1731 uint32_t submit_count = static_cast<GLuint>(c.submit_count);
1732
1733 if (target != GL_COMMANDS_ISSUED_TIMESTAMP_CHROMIUM) {
1734 LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "glQueryCounterEXT",
1735 "unknown query target");
1736 return error::kNoError;
1737 }
1738
1739 scoped_refptr<Buffer> buffer = GetSharedMemoryBuffer(sync_shm_id);
1740 if (!buffer)
1741 return error::kInvalidArguments;
1742 QuerySync* sync = static_cast<QuerySync*>(
1743 buffer->GetDataAddress(sync_shm_offset, sizeof(QuerySync)));
1744 if (!sync)
1745 return error::kOutOfBounds;
1746
1747 QueryManager::Query* query = query_manager_->GetQuery(client_id);
1748 if (!query) {
1749 if (!query_manager_->IsValidQuery(client_id)) {
1750 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glQueryCounterEXT",
1751 "id not made by glGenQueriesEXT");
1752 return error::kNoError;
1753 }
1754 query =
1755 query_manager_->CreateQuery(target, client_id, std::move(buffer), sync);
1756 } else {
1757 if (query->target() != target) {
1758 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glQueryCounterEXT",
1759 "target does not match");
1760 return error::kNoError;
1761 } else if (query->sync() != sync) {
1762 DLOG(ERROR) << "Shared memory used by query not the same as before";
1763 return error::kInvalidArguments;
1764 }
1765 }
1766 query_manager_->QueryCounter(query, submit_count);
1767
1768 return error::kNoError;
1769 }
1770
DoFinish()1771 void RasterDecoderImpl::DoFinish() {
1772 if (shared_context_state_->GrContextIsGL())
1773 api()->glFinishFn();
1774 ProcessPendingQueries(true);
1775 }
1776
DoFlush()1777 void RasterDecoderImpl::DoFlush() {
1778 if (shared_context_state_->GrContextIsGL())
1779 api()->glFlushFn();
1780 ProcessPendingQueries(false);
1781 }
1782
GenQueriesEXTHelper(GLsizei n,const GLuint * client_ids)1783 bool RasterDecoderImpl::GenQueriesEXTHelper(GLsizei n,
1784 const GLuint* client_ids) {
1785 for (GLsizei ii = 0; ii < n; ++ii) {
1786 if (query_manager_->IsValidQuery(client_ids[ii])) {
1787 return false;
1788 }
1789 }
1790 query_manager_->GenQueries(n, client_ids);
1791 return true;
1792 }
1793
DeleteQueriesEXTHelper(GLsizei n,const volatile GLuint * client_ids)1794 void RasterDecoderImpl::DeleteQueriesEXTHelper(
1795 GLsizei n,
1796 const volatile GLuint* client_ids) {
1797 for (GLsizei ii = 0; ii < n; ++ii) {
1798 GLuint client_id = client_ids[ii];
1799 query_manager_->RemoveQuery(client_id);
1800 }
1801 }
1802
HandleTraceBeginCHROMIUM(uint32_t immediate_data_size,const volatile void * cmd_data)1803 error::Error RasterDecoderImpl::HandleTraceBeginCHROMIUM(
1804 uint32_t immediate_data_size,
1805 const volatile void* cmd_data) {
1806 const volatile gles2::cmds::TraceBeginCHROMIUM& c =
1807 *static_cast<const volatile gles2::cmds::TraceBeginCHROMIUM*>(cmd_data);
1808 Bucket* category_bucket = GetBucket(c.category_bucket_id);
1809 Bucket* name_bucket = GetBucket(c.name_bucket_id);
1810 static constexpr size_t kMaxStrLen = 256;
1811 if (!category_bucket || category_bucket->size() == 0 ||
1812 category_bucket->size() > kMaxStrLen || !name_bucket ||
1813 name_bucket->size() == 0 || name_bucket->size() > kMaxStrLen) {
1814 return error::kInvalidArguments;
1815 }
1816
1817 std::string category_name;
1818 std::string trace_name;
1819 if (!category_bucket->GetAsString(&category_name) ||
1820 !name_bucket->GetAsString(&trace_name)) {
1821 return error::kInvalidArguments;
1822 }
1823
1824 debug_marker_manager_.PushGroup(trace_name);
1825 if (!gpu_tracer_->Begin(category_name, trace_name, gles2::kTraceCHROMIUM)) {
1826 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glTraceBeginCHROMIUM",
1827 "unable to create begin trace");
1828 return error::kNoError;
1829 }
1830 return error::kNoError;
1831 }
1832
DoTraceEndCHROMIUM()1833 void RasterDecoderImpl::DoTraceEndCHROMIUM() {
1834 debug_marker_manager_.PopGroup();
1835 if (!gpu_tracer_->End(gles2::kTraceCHROMIUM)) {
1836 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glTraceEndCHROMIUM",
1837 "no trace begin found");
1838 return;
1839 }
1840 }
1841
HandleSetActiveURLCHROMIUM(uint32_t immediate_data_size,const volatile void * cmd_data)1842 error::Error RasterDecoderImpl::HandleSetActiveURLCHROMIUM(
1843 uint32_t immediate_data_size,
1844 const volatile void* cmd_data) {
1845 const volatile cmds::SetActiveURLCHROMIUM& c =
1846 *static_cast<const volatile cmds::SetActiveURLCHROMIUM*>(cmd_data);
1847 Bucket* url_bucket = GetBucket(c.url_bucket_id);
1848 static constexpr size_t kMaxStrLen = 1024;
1849 if (!url_bucket || url_bucket->size() == 0 ||
1850 url_bucket->size() > kMaxStrLen) {
1851 return error::kInvalidArguments;
1852 }
1853
1854 size_t size = url_bucket->size();
1855 const char* url_str = url_bucket->GetDataAs<const char*>(0, size);
1856 if (!url_str)
1857 return error::kInvalidArguments;
1858
1859 GURL url(base::StringPiece(url_str, size));
1860 client()->SetActiveURL(std::move(url));
1861 return error::kNoError;
1862 }
1863
InitializeCopyTexImageBlitter()1864 bool RasterDecoderImpl::InitializeCopyTexImageBlitter() {
1865 if (!copy_tex_image_blit_.get()) {
1866 LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopySubTexture");
1867 copy_tex_image_blit_.reset(
1868 new gles2::CopyTexImageResourceManager(feature_info()));
1869 copy_tex_image_blit_->Initialize(this);
1870 if (LOCAL_PEEK_GL_ERROR("glCopySubTexture") != GL_NO_ERROR)
1871 return false;
1872 }
1873 return true;
1874 }
1875
InitializeCopyTextureCHROMIUM()1876 bool RasterDecoderImpl::InitializeCopyTextureCHROMIUM() {
1877 // Defer initializing the CopyTextureCHROMIUMResourceManager until it is
1878 // needed because it takes 10s of milliseconds to initialize.
1879 if (!copy_texture_chromium_.get()) {
1880 LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopySubTexture");
1881 copy_texture_chromium_.reset(
1882 gles2::CopyTextureCHROMIUMResourceManager::Create());
1883 copy_texture_chromium_->Initialize(this, features());
1884 if (LOCAL_PEEK_GL_ERROR("glCopySubTexture") != GL_NO_ERROR)
1885 return false;
1886
1887 // On the desktop core profile this also needs emulation of
1888 // CopyTex{Sub}Image2D for luminance, alpha, and luminance_alpha
1889 // textures.
1890 if (gles2::CopyTexImageResourceManager::CopyTexImageRequiresBlit(
1891 feature_info(), GL_LUMINANCE)) {
1892 if (!InitializeCopyTexImageBlitter())
1893 return false;
1894 }
1895 }
1896 return true;
1897 }
1898
DoCopySubTextureINTERNAL(GLint xoffset,GLint yoffset,GLint x,GLint y,GLsizei width,GLsizei height,GLboolean unpack_flip_y,const volatile GLbyte * mailboxes)1899 void RasterDecoderImpl::DoCopySubTextureINTERNAL(
1900 GLint xoffset,
1901 GLint yoffset,
1902 GLint x,
1903 GLint y,
1904 GLsizei width,
1905 GLsizei height,
1906 GLboolean unpack_flip_y,
1907 const volatile GLbyte* mailboxes) {
1908 Mailbox source_mailbox = Mailbox::FromVolatile(
1909 reinterpret_cast<const volatile Mailbox*>(mailboxes)[0]);
1910 DLOG_IF(ERROR, !source_mailbox.Verify())
1911 << "CopySubTexture was passed an invalid mailbox";
1912 Mailbox dest_mailbox = Mailbox::FromVolatile(
1913 reinterpret_cast<const volatile Mailbox*>(mailboxes)[1]);
1914 DLOG_IF(ERROR, !dest_mailbox.Verify())
1915 << "CopySubTexture was passed an invalid mailbox";
1916
1917 if (source_mailbox == dest_mailbox) {
1918 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
1919 "source and destination mailboxes are the same");
1920 return;
1921 }
1922
1923 if (!shared_context_state_->GrContextIsGL()) {
1924 // Use Skia to copy texture if raster's gr_context() is not using GL.
1925 DoCopySubTextureINTERNALSkia(xoffset, yoffset, x, y, width, height,
1926 unpack_flip_y, source_mailbox, dest_mailbox);
1927 } else if (use_passthrough_) {
1928 DoCopySubTextureINTERNALGLPassthrough(xoffset, yoffset, x, y, width, height,
1929 unpack_flip_y, source_mailbox,
1930 dest_mailbox);
1931 } else {
1932 DoCopySubTextureINTERNALGL(xoffset, yoffset, x, y, width, height,
1933 unpack_flip_y, source_mailbox, dest_mailbox);
1934 }
1935 }
1936
1937 namespace {
1938
NeedsUnpackPremultiplyAlpha(const SharedImageRepresentation & representation)1939 GLboolean NeedsUnpackPremultiplyAlpha(
1940 const SharedImageRepresentation& representation) {
1941 return representation.alpha_type() == kUnpremul_SkAlphaType;
1942 }
1943
1944 } // namespace
1945
DoCopySubTextureINTERNALGLPassthrough(GLint xoffset,GLint yoffset,GLint x,GLint y,GLsizei width,GLsizei height,GLboolean unpack_flip_y,const Mailbox & source_mailbox,const Mailbox & dest_mailbox)1946 void RasterDecoderImpl::DoCopySubTextureINTERNALGLPassthrough(
1947 GLint xoffset,
1948 GLint yoffset,
1949 GLint x,
1950 GLint y,
1951 GLsizei width,
1952 GLsizei height,
1953 GLboolean unpack_flip_y,
1954 const Mailbox& source_mailbox,
1955 const Mailbox& dest_mailbox) {
1956 DCHECK(source_mailbox != dest_mailbox);
1957 DCHECK(use_passthrough_);
1958
1959 std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
1960 source_shared_image =
1961 shared_image_representation_factory_.ProduceGLTexturePassthrough(
1962 source_mailbox);
1963 std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
1964 dest_shared_image =
1965 shared_image_representation_factory_.ProduceGLTexturePassthrough(
1966 dest_mailbox);
1967 if (!source_shared_image || !dest_shared_image) {
1968 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
1969 return;
1970 }
1971
1972 std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
1973 source_access = source_shared_image->BeginScopedAccess(
1974 GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
1975 SharedImageRepresentation::AllowUnclearedAccess::kNo);
1976 if (!source_access) {
1977 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
1978 "unable to access source for read");
1979 return;
1980 }
1981
1982 // Allow uncleared access, as we manually handle clear tracking.
1983 std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
1984 dest_access = dest_shared_image->BeginScopedAccess(
1985 GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
1986 SharedImageRepresentation::AllowUnclearedAccess::kYes);
1987 if (!dest_access) {
1988 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
1989 "unable to access destination for write");
1990 return;
1991 }
1992
1993 gfx::Rect new_cleared_rect;
1994 gfx::Rect old_cleared_rect = dest_shared_image->ClearedRect();
1995 gfx::Rect dest_rect(xoffset, yoffset, width, height);
1996 if (gles2::TextureManager::CombineAdjacentRects(old_cleared_rect, dest_rect,
1997 &new_cleared_rect)) {
1998 DCHECK(old_cleared_rect.IsEmpty() ||
1999 new_cleared_rect.Contains(old_cleared_rect));
2000 } else {
2001 // No users of RasterDecoder leverage this functionality. Clearing uncleared
2002 // regions could be added here if needed.
2003 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2004 "Cannot clear non-combineable rects.");
2005 return;
2006 }
2007
2008 gles2::TexturePassthrough* source_texture =
2009 source_shared_image->GetTexturePassthrough().get();
2010 gles2::TexturePassthrough* dest_texture =
2011 dest_shared_image->GetTexturePassthrough().get();
2012 DCHECK(!source_texture->is_bind_pending());
2013 DCHECK_NE(source_texture->service_id(), dest_texture->service_id());
2014
2015 api()->glCopySubTextureCHROMIUMFn(
2016 source_texture->service_id(), /*source_level=*/0, dest_texture->target(),
2017 dest_texture->service_id(),
2018 /*dest_level=*/0, xoffset, yoffset, x, y, width, height, unpack_flip_y,
2019 NeedsUnpackPremultiplyAlpha(*source_shared_image),
2020 /*unpack_unmultiply_alpha=*/false);
2021 LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopySubTexture");
2022
2023 if (!dest_shared_image->IsCleared()) {
2024 dest_shared_image->SetClearedRect(new_cleared_rect);
2025 }
2026 }
2027
DoCopySubTextureINTERNALGL(GLint xoffset,GLint yoffset,GLint x,GLint y,GLsizei width,GLsizei height,GLboolean unpack_flip_y,const Mailbox & source_mailbox,const Mailbox & dest_mailbox)2028 void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
2029 GLint xoffset,
2030 GLint yoffset,
2031 GLint x,
2032 GLint y,
2033 GLsizei width,
2034 GLsizei height,
2035 GLboolean unpack_flip_y,
2036 const Mailbox& source_mailbox,
2037 const Mailbox& dest_mailbox) {
2038 DCHECK(source_mailbox != dest_mailbox);
2039 DCHECK(shared_context_state_->GrContextIsGL());
2040
2041 std::unique_ptr<SharedImageRepresentationGLTexture> source_shared_image =
2042 shared_image_representation_factory_.ProduceGLTexture(source_mailbox);
2043 std::unique_ptr<SharedImageRepresentationGLTexture> dest_shared_image =
2044 shared_image_representation_factory_.ProduceGLTexture(dest_mailbox);
2045 if (!source_shared_image || !dest_shared_image) {
2046 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
2047 return;
2048 }
2049
2050 std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
2051 source_access = source_shared_image->BeginScopedAccess(
2052 GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
2053 SharedImageRepresentation::AllowUnclearedAccess::kNo);
2054 if (!source_access) {
2055 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2056 "unable to access source for read");
2057 return;
2058 }
2059
2060 gles2::Texture* source_texture = source_shared_image->GetTexture();
2061 GLenum source_target = source_texture->target();
2062 DCHECK(source_target);
2063 GLint source_level = 0;
2064 gfx::Size source_size = source_shared_image->size();
2065 gfx::Rect source_rect(x, y, width, height);
2066 if (!gfx::Rect(source_size).Contains(source_rect)) {
2067 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2068 "source texture bad dimensions.");
2069 return;
2070 }
2071
2072 // Allow uncleared access, as we manually handle clear tracking.
2073 std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
2074 dest_access = dest_shared_image->BeginScopedAccess(
2075 GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
2076 SharedImageRepresentation::AllowUnclearedAccess::kYes);
2077 if (!dest_access) {
2078 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2079 "unable to access destination for write");
2080 return;
2081 }
2082
2083 gles2::Texture* dest_texture = dest_shared_image->GetTexture();
2084 GLenum dest_target = dest_texture->target();
2085 DCHECK(dest_target);
2086 GLint dest_level = 0;
2087 gfx::Size dest_size = dest_shared_image->size();
2088 gfx::Rect dest_rect(xoffset, yoffset, width, height);
2089 if (!gfx::Rect(dest_size).Contains(dest_rect)) {
2090 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2091 "destination texture bad dimensions.");
2092 return;
2093 }
2094
2095 DCHECK_NE(source_texture->service_id(), dest_texture->service_id());
2096
2097 GLenum source_type = 0;
2098 GLenum source_internal_format = 0;
2099 source_texture->GetLevelType(source_target, source_level, &source_type,
2100 &source_internal_format);
2101
2102 GLenum dest_type = 0;
2103 GLenum dest_internal_format = 0;
2104 bool dest_level_defined = dest_texture->GetLevelType(
2105 dest_target, dest_level, &dest_type, &dest_internal_format);
2106 DCHECK(dest_level_defined);
2107
2108 // TODO(piman): Do we need this check? It might always be true by
2109 // construction.
2110 std::string output_error_msg;
2111 if (!ValidateCopyTextureCHROMIUMInternalFormats(
2112 GetFeatureInfo(), source_internal_format, dest_internal_format,
2113 &output_error_msg)) {
2114 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
2115 output_error_msg.c_str());
2116 return;
2117 }
2118
2119 // Clear the source texture if necessary.
2120 if (!gles2::TextureManager::ClearTextureLevel(this, source_texture,
2121 source_target, 0 /* level */)) {
2122 LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopySubTexture",
2123 "source texture dimensions too big");
2124 return;
2125 }
2126
2127 gfx::Rect new_cleared_rect;
2128 gfx::Rect old_cleared_rect =
2129 dest_texture->GetLevelClearedRect(dest_target, dest_level);
2130 if (gles2::TextureManager::CombineAdjacentRects(
2131 dest_texture->GetLevelClearedRect(dest_target, dest_level), dest_rect,
2132 &new_cleared_rect)) {
2133 DCHECK(old_cleared_rect.IsEmpty() ||
2134 new_cleared_rect.Contains(old_cleared_rect));
2135 } else {
2136 // No users of RasterDecoder leverage this functionality. Clearing uncleared
2137 // regions could be added here if needed.
2138 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2139 "Cannot clear non-combineable rects.");
2140 return;
2141 }
2142
2143 ScopedTextureBinder binder(state(), dest_target, dest_texture->service_id(),
2144 gr_context());
2145
2146 gles2::Texture::ImageState image_state;
2147 if (gl::GLImage* image =
2148 source_texture->GetLevelImage(source_target, 0, &image_state)) {
2149 base::Optional<ScopedPixelUnpackState> pixel_unpack_state;
2150 if (image->GetType() == gl::GLImage::Type::MEMORY &&
2151 shared_context_state_->need_context_state_reset()) {
2152 // If the image is in shared memory, we may need upload the pixel data
2153 // with SubTexImage2D, so we need reset pixel unpack state if gl context
2154 // state has been touched by skia.
2155 pixel_unpack_state.emplace(state(), gr_context(), feature_info());
2156 }
2157
2158 // Try to copy by uploading to the destination texture.
2159 if (dest_internal_format == source_internal_format) {
2160 if (image->CopyTexSubImage(dest_target, gfx::Point(xoffset, yoffset),
2161 gfx::Rect(x, y, width, height))) {
2162 dest_texture->SetLevelClearedRect(dest_target, dest_level,
2163 new_cleared_rect);
2164 return;
2165 }
2166 }
2167
2168 // Otherwise, update the source if needed.
2169 if (image_state == gles2::Texture::UNBOUND) {
2170 ScopedGLErrorSuppressor suppressor(
2171 "RasterDecoderImpl::DoCopySubTextureINTERNAL", error_state_.get());
2172 api()->glBindTextureFn(source_target, source_texture->service_id());
2173 if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
2174 bool rv = image->BindTexImage(source_target);
2175 DCHECK(rv) << "BindTexImage() failed";
2176 image_state = gles2::Texture::BOUND;
2177 } else {
2178 bool rv = image->CopyTexImage(source_target);
2179 DCHECK(rv) << "CopyTexImage() failed";
2180 image_state = gles2::Texture::COPIED;
2181 }
2182 source_texture->SetLevelImageState(source_target, 0, image_state);
2183 }
2184 }
2185
2186 if (!InitializeCopyTextureCHROMIUM())
2187 return;
2188
2189 gles2::CopyTextureMethod method = GetCopyTextureCHROMIUMMethod(
2190 GetFeatureInfo(), source_target, source_level, source_internal_format,
2191 source_type, dest_target, dest_level, dest_internal_format, unpack_flip_y,
2192 NeedsUnpackPremultiplyAlpha(*source_shared_image),
2193 false /* unpack_unmultiply_alpha */, false /* dither */);
2194 #if BUILDFLAG(IS_ASH) && defined(ARCH_CPU_X86_FAMILY)
2195 // glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
2196 // although opposite in Android.
2197 // TODO(dshwang): After Mesa fixes this issue, remove this hack.
2198 // https://bugs.freedesktop.org/show_bug.cgi?id=98478,
2199 // https://crbug.com/535198.
2200 if (gles2::Texture::ColorRenderable(GetFeatureInfo(), dest_internal_format,
2201 dest_texture->IsImmutable()) &&
2202 method == gles2::CopyTextureMethod::DIRECT_COPY) {
2203 method = gles2::CopyTextureMethod::DIRECT_DRAW;
2204 }
2205 #endif
2206
2207 in_copy_sub_texture_ = true;
2208 copy_texture_chromium_->DoCopySubTexture(
2209 this, source_target, source_texture->service_id(), source_level,
2210 source_internal_format, dest_target, dest_texture->service_id(),
2211 dest_level, dest_internal_format, xoffset, yoffset, x, y, width, height,
2212 dest_size.width(), dest_size.height(), source_size.width(),
2213 source_size.height(), unpack_flip_y,
2214 NeedsUnpackPremultiplyAlpha(*source_shared_image),
2215 false /* unpack_unmultiply_alpha */, false /* dither */, method,
2216 copy_tex_image_blit_.get());
2217 dest_texture->SetLevelClearedRect(dest_target, dest_level, new_cleared_rect);
2218 in_copy_sub_texture_ = false;
2219 if (reset_texture_state_) {
2220 reset_texture_state_ = false;
2221 for (auto* texture : {source_texture, dest_texture}) {
2222 GLenum target = texture->target();
2223 api()->glBindTextureFn(target, texture->service_id());
2224 api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, texture->wrap_s());
2225 api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, texture->wrap_t());
2226 api()->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER,
2227 texture->min_filter());
2228 api()->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER,
2229 texture->mag_filter());
2230 }
2231 shared_context_state_->PessimisticallyResetGrContext();
2232 }
2233 }
2234
DoCopySubTextureINTERNALSkia(GLint xoffset,GLint yoffset,GLint x,GLint y,GLsizei width,GLsizei height,GLboolean unpack_flip_y,const Mailbox & source_mailbox,const Mailbox & dest_mailbox)2235 void RasterDecoderImpl::DoCopySubTextureINTERNALSkia(
2236 GLint xoffset,
2237 GLint yoffset,
2238 GLint x,
2239 GLint y,
2240 GLsizei width,
2241 GLsizei height,
2242 GLboolean unpack_flip_y,
2243 const Mailbox& source_mailbox,
2244 const Mailbox& dest_mailbox) {
2245 DCHECK(source_mailbox != dest_mailbox);
2246
2247 // Use Skia to copy texture if raster's gr_context() is not using GL.
2248 auto source_shared_image = shared_image_representation_factory_.ProduceSkia(
2249 source_mailbox, shared_context_state_);
2250 auto dest_shared_image = shared_image_representation_factory_.ProduceSkia(
2251 dest_mailbox, shared_context_state_);
2252 if (!source_shared_image || !dest_shared_image) {
2253 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
2254 return;
2255 }
2256
2257 gfx::Size source_size = source_shared_image->size();
2258 gfx::Rect source_rect(x, y, width, height);
2259 if (!gfx::Rect(source_size).Contains(source_rect)) {
2260 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2261 "source texture bad dimensions.");
2262 return;
2263 }
2264
2265 gfx::Size dest_size = dest_shared_image->size();
2266 gfx::Rect dest_rect(xoffset, yoffset, width, height);
2267 if (!gfx::Rect(dest_size).Contains(dest_rect)) {
2268 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2269 "destination texture bad dimensions.");
2270 return;
2271 }
2272
2273 std::vector<GrBackendSemaphore> begin_semaphores;
2274 std::vector<GrBackendSemaphore> end_semaphores;
2275
2276 // Allow uncleared access, as we manually handle clear tracking.
2277 std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
2278 dest_scoped_access = dest_shared_image->BeginScopedWriteAccess(
2279 &begin_semaphores, &end_semaphores,
2280 SharedImageRepresentation::AllowUnclearedAccess::kYes);
2281 if (!dest_scoped_access) {
2282 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2283 "Dest shared image is not writable");
2284 return;
2285 }
2286
2287 gfx::Rect new_cleared_rect;
2288 gfx::Rect old_cleared_rect = dest_shared_image->ClearedRect();
2289 if (gles2::TextureManager::CombineAdjacentRects(old_cleared_rect, dest_rect,
2290 &new_cleared_rect)) {
2291 DCHECK(old_cleared_rect.IsEmpty() ||
2292 new_cleared_rect.Contains(old_cleared_rect));
2293 } else {
2294 // No users of RasterDecoder leverage this functionality. Clearing uncleared
2295 // regions could be added here if needed.
2296 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2297 "Cannot clear non-combineable rects.");
2298 return;
2299 }
2300
2301 // With OneCopyRasterBufferProvider, source_shared_image->BeginReadAccess()
2302 // will copy pixels from SHM GMB to the texture in |source_shared_image|,
2303 // and then use drawImageRect() to draw that texure to the target
2304 // |dest_shared_image|. We can save one copy by drawing the SHM GMB to the
2305 // target |dest_shared_image| directly.
2306 // TODO(penghuang): get rid of the one extra copy. https://crbug.com/984045
2307 std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
2308 source_scoped_access = source_shared_image->BeginScopedReadAccess(
2309 &begin_semaphores, &end_semaphores);
2310
2311 if (!begin_semaphores.empty()) {
2312 bool result = dest_scoped_access->surface()->wait(
2313 begin_semaphores.size(), begin_semaphores.data(),
2314 /*deleteSemaphoresAfterWait=*/false);
2315 DCHECK(result);
2316 }
2317
2318 if (!source_scoped_access) {
2319 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2320 "Source shared image is not accessable");
2321 } else {
2322 auto source_image = source_scoped_access->CreateSkImage(
2323 shared_context_state_->gr_context());
2324 if (!source_image) {
2325 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
2326 "Couldn't create SkImage from source shared image.");
2327 }
2328
2329 auto* canvas = dest_scoped_access->surface()->getCanvas();
2330 SkPaint paint;
2331 if (unpack_flip_y) {
2332 canvas->scale(1, -1);
2333 canvas->translate(0, -height);
2334 }
2335 paint.setBlendMode(SkBlendMode::kSrc);
2336 canvas->drawImageRect(source_image, gfx::RectToSkRect(source_rect),
2337 gfx::RectToSkRect(dest_rect), &paint);
2338 }
2339
2340 FlushAndSubmitIfNecessary(dest_scoped_access->surface(),
2341 std::move(end_semaphores));
2342 if (!dest_shared_image->IsCleared()) {
2343 dest_shared_image->SetClearedRect(new_cleared_rect);
2344 }
2345 }
2346
DoWritePixelsINTERNAL(GLint x_offset,GLint y_offset,GLuint src_width,GLuint src_height,GLuint row_bytes,GLuint src_sk_color_type,GLuint src_sk_alpha_type,GLint shm_id,GLuint shm_offset,GLuint pixels_offset,const volatile GLbyte * mailbox)2347 void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset,
2348 GLint y_offset,
2349 GLuint src_width,
2350 GLuint src_height,
2351 GLuint row_bytes,
2352 GLuint src_sk_color_type,
2353 GLuint src_sk_alpha_type,
2354 GLint shm_id,
2355 GLuint shm_offset,
2356 GLuint pixels_offset,
2357 const volatile GLbyte* mailbox) {
2358 if (src_sk_color_type > kLastEnum_SkColorType) {
2359 LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "WritePixels",
2360 "src_sk_color_type must be a valid SkColorType");
2361 return;
2362 }
2363 if (src_sk_alpha_type > kLastEnum_SkAlphaType) {
2364 LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "WritePixels",
2365 "src_sk_alpha_type must be a valid SkAlphaType");
2366 return;
2367 }
2368
2369 Mailbox dest_mailbox = Mailbox::FromVolatile(
2370 *reinterpret_cast<const volatile Mailbox*>(mailbox));
2371 DLOG_IF(ERROR, !dest_mailbox.Verify())
2372 << "WritePixels was passed an invalid mailbox";
2373 auto dest_shared_image = shared_image_representation_factory_.ProduceSkia(
2374 dest_mailbox, shared_context_state_);
2375 if (!dest_shared_image) {
2376 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
2377 "Attempting to write to unknown mailbox.");
2378 return;
2379 }
2380
2381 if (SkColorTypeBytesPerPixel(viz::ResourceFormatToClosestSkColorType(
2382 true, dest_shared_image->format())) !=
2383 SkColorTypeBytesPerPixel(static_cast<SkColorType>(src_sk_color_type))) {
2384 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
2385 "Bytes per pixel for src SkColorType and dst "
2386 "SkColorType must be the same.");
2387 return;
2388 }
2389
2390 // If present, the color space is serialized into shared memory before the
2391 // pixel data.
2392 sk_sp<SkColorSpace> color_space;
2393 if (pixels_offset > 0) {
2394 void* color_space_bytes =
2395 GetSharedMemoryAs<void*>(shm_id, shm_offset, pixels_offset);
2396 if (!color_space_bytes) {
2397 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
2398 "Failed to retrieve serialized SkColorSpace.");
2399 return;
2400 }
2401
2402 color_space = SkColorSpace::Deserialize(color_space_bytes, pixels_offset);
2403 if (!color_space) {
2404 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
2405 "Failed to deserialize expected SkColorSpace");
2406 return;
2407 }
2408 }
2409
2410 SkImageInfo src_info = SkImageInfo::Make(
2411 src_width, src_height, static_cast<SkColorType>(src_sk_color_type),
2412 static_cast<SkAlphaType>(src_sk_alpha_type), std::move(color_space));
2413
2414 if (row_bytes < src_info.minRowBytes()) {
2415 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glWritePixels",
2416 "row_bytes be >= "
2417 "SkImageInfo::minRowBytes() for source image.");
2418 return;
2419 }
2420
2421 std::vector<GrBackendSemaphore> begin_semaphores;
2422 std::vector<GrBackendSemaphore> end_semaphores;
2423
2424 // Allow uncleared access, as we manually handle clear tracking.
2425 std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
2426 dest_scoped_access = dest_shared_image->BeginScopedWriteAccess(
2427 &begin_semaphores, &end_semaphores,
2428 SharedImageRepresentation::AllowUnclearedAccess::kYes);
2429 if (!dest_scoped_access) {
2430 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glWritePixels",
2431 "Dest shared image is not writable");
2432 return;
2433 }
2434
2435 if (!begin_semaphores.empty()) {
2436 bool result = dest_scoped_access->surface()->wait(
2437 begin_semaphores.size(), begin_semaphores.data(),
2438 /*deleteSemaphoresAfterWait=*/false);
2439 if (!result) {
2440 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
2441 "Unable to obtain write access to dest shared image.");
2442 return;
2443 }
2444 }
2445
2446 size_t byte_size = src_info.computeByteSize(row_bytes);
2447 if (byte_size > UINT32_MAX) {
2448 LOCAL_SET_GL_ERROR(
2449 GL_INVALID_VALUE, "glWritePixels",
2450 "Cannot request a memory chunk larger than UINT32_MAX bytes");
2451 return;
2452 }
2453
2454 // The pixels are stored after the serialized SkColorSpace + padding
2455 void* pixel_data =
2456 GetSharedMemoryAs<void*>(shm_id, shm_offset + pixels_offset, byte_size);
2457 if (!pixel_data) {
2458 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
2459 "Couldn't retrieve pixel data.");
2460 return;
2461 }
2462 auto* canvas = dest_scoped_access->surface()->getCanvas();
2463 bool written =
2464 canvas->writePixels(src_info, pixel_data, row_bytes, x_offset, y_offset);
2465 if (!written) {
2466 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
2467 "Failed to write pixels to SkCanvas");
2468 }
2469
2470 FlushAndSubmitIfNecessary(dest_scoped_access->surface(),
2471 std::move(end_semaphores));
2472 if (!dest_shared_image->IsCleared()) {
2473 dest_shared_image->SetClearedRect(
2474 gfx::Rect(x_offset, y_offset, src_width, src_height));
2475 }
2476 }
2477
DoReadbackImagePixelsINTERNAL(GLint src_x,GLint src_y,GLuint dst_width,GLuint dst_height,GLuint row_bytes,GLuint dst_sk_color_type,GLuint dst_sk_alpha_type,GLint shm_id,GLuint shm_offset,GLuint pixels_offset,const volatile GLbyte * mailbox)2478 void RasterDecoderImpl::DoReadbackImagePixelsINTERNAL(
2479 GLint src_x,
2480 GLint src_y,
2481 GLuint dst_width,
2482 GLuint dst_height,
2483 GLuint row_bytes,
2484 GLuint dst_sk_color_type,
2485 GLuint dst_sk_alpha_type,
2486 GLint shm_id,
2487 GLuint shm_offset,
2488 GLuint pixels_offset,
2489 const volatile GLbyte* mailbox) {
2490 if (dst_sk_color_type > kLastEnum_SkColorType) {
2491 LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "ReadbackImagePixels",
2492 "dst_sk_color_type must be a valid SkColorType");
2493 return;
2494 }
2495 if (dst_sk_alpha_type > kLastEnum_SkAlphaType) {
2496 LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "ReadbackImagePixels",
2497 "dst_sk_alpha_type must be a valid SkAlphaType");
2498 return;
2499 }
2500
2501 Mailbox source_mailbox = Mailbox::FromVolatile(
2502 *reinterpret_cast<const volatile Mailbox*>(mailbox));
2503 DLOG_IF(ERROR, !source_mailbox.Verify())
2504 << "ReadbackImagePixels was passed an invalid mailbox";
2505 auto source_shared_image = shared_image_representation_factory_.ProduceSkia(
2506 source_mailbox, shared_context_state_);
2507 if (!source_shared_image) {
2508 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glReadbackImagePixels",
2509 "Unknown mailbox");
2510 return;
2511 }
2512
2513 // If present, the color space is serialized into shared memory before the
2514 // pixel data.
2515 sk_sp<SkColorSpace> dst_color_space;
2516 if (pixels_offset > 0) {
2517 void* color_space_bytes =
2518 GetSharedMemoryAs<void*>(shm_id, shm_offset, pixels_offset);
2519 if (!color_space_bytes) {
2520 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glReadbackImagePixels",
2521 "Failed to retrieve serialized SkColorSpace.");
2522 return;
2523 }
2524 dst_color_space =
2525 SkColorSpace::Deserialize(color_space_bytes, pixels_offset);
2526 if (!dst_color_space) {
2527 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glReadbackImagePixels",
2528 "Failed to deserialize expected SkColorSpace");
2529 return;
2530 }
2531 }
2532
2533 SkImageInfo dst_info = SkImageInfo::Make(
2534 dst_width, dst_height, static_cast<SkColorType>(dst_sk_color_type),
2535 static_cast<SkAlphaType>(dst_sk_alpha_type), std::move(dst_color_space));
2536
2537 if (row_bytes < dst_info.minRowBytes()) {
2538 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glReadbackImagePixels",
2539 "row_bytes be >= "
2540 "SkImageInfo::minRowBytes() for dest image.");
2541 return;
2542 }
2543
2544 std::vector<GrBackendSemaphore> begin_semaphores;
2545
2546 std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
2547 source_scoped_access = source_shared_image->BeginScopedReadAccess(
2548 &begin_semaphores, nullptr);
2549
2550 if (!begin_semaphores.empty()) {
2551 bool result = shared_context_state_->gr_context()->wait(
2552 begin_semaphores.size(), begin_semaphores.data(),
2553 /*deleteSemaphoresAfterWait=*/false);
2554 DCHECK(result);
2555 }
2556
2557 if (!source_scoped_access) {
2558 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glReadbackImagePixels",
2559 "Source shared image is not accessible");
2560 return;
2561 }
2562
2563 auto sk_image =
2564 source_scoped_access->CreateSkImage(shared_context_state_->gr_context());
2565 if (!sk_image) {
2566 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glReadbackImagePixels",
2567 "Couldn't create SkImage for reading.");
2568 return;
2569 }
2570
2571 size_t byte_size = dst_info.computeByteSize(row_bytes);
2572 if (byte_size > UINT32_MAX) {
2573 LOCAL_SET_GL_ERROR(
2574 GL_INVALID_VALUE, "glReadbackImagePixels",
2575 "Cannot request a memory chunk larger than UINT32_MAX bytes");
2576 return;
2577 }
2578
2579 void* shm_address =
2580 GetSharedMemoryAs<void*>(shm_id, shm_offset + pixels_offset, byte_size);
2581 if (!shm_address) {
2582 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glReadbackImagePixels",
2583 "Failed to retrieve memory for readPixels");
2584 return;
2585 }
2586
2587 bool success =
2588 sk_image->readPixels(dst_info, shm_address, row_bytes, src_x, src_y);
2589 if (!success) {
2590 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glReadbackImagePixels",
2591 "Failed to read pixels from SkImage");
2592 }
2593 }
2594
2595 namespace {
2596 // Helper class for mailbox index iteration that handles NV12 images which have
2597 // no separate V plane mailbox.
2598 class YUVConversionMailboxIndex {
2599 public:
YUVConversionMailboxIndex(bool is_nv12)2600 explicit YUVConversionMailboxIndex(bool is_nv12)
2601 : is_nv12_(is_nv12), cur_index_(kYIndex) {}
2602 ~YUVConversionMailboxIndex() = default;
2603
operator ++()2604 YUVConversionMailboxIndex& operator++() {
2605 cur_index_++;
2606 if (cur_index_ == kVIndex && is_nv12_)
2607 cur_index_++;
2608 return *this;
2609 }
2610
operator ()()2611 size_t operator()() { return cur_index_; }
2612
reset()2613 void reset() { cur_index_ = kYIndex; }
2614
2615 enum Index : size_t {
2616 kYIndex = 0,
2617 kUIndex = 1,
2618 kVIndex = 2,
2619 kDestIndex = 3,
2620 };
2621
ToString()2622 std::string ToString() {
2623 switch (cur_index_) {
2624 case YUVConversionMailboxIndex::kYIndex:
2625 return "Y Plane";
2626 case YUVConversionMailboxIndex::kUIndex:
2627 return is_nv12_ ? "UV Plane" : "U Plane";
2628 case YUVConversionMailboxIndex::kVIndex:
2629 DCHECK(!is_nv12_);
2630 return "V Plane";
2631 case YUVConversionMailboxIndex::kDestIndex:
2632 return "Destination";
2633 default:
2634 return "Invalid mailbox index";
2635 }
2636 }
2637
2638 static constexpr size_t kNumInputMailboxes =
2639 YUVConversionMailboxIndex::kVIndex + 1;
2640 static constexpr size_t kTotalMailboxes =
2641 YUVConversionMailboxIndex::kDestIndex + 1;
2642
2643 private:
2644 bool is_nv12_;
2645 size_t cur_index_;
2646 };
2647
2648 } // namespace
2649
DoConvertYUVMailboxesToRGBINTERNAL(GLenum planes_yuv_color_space,GLboolean is_nv12,const volatile GLbyte * mailboxes_in)2650 void RasterDecoderImpl::DoConvertYUVMailboxesToRGBINTERNAL(
2651 GLenum planes_yuv_color_space,
2652 GLboolean is_nv12,
2653 const volatile GLbyte* mailboxes_in) {
2654 if (planes_yuv_color_space > kLastEnum_SkYUVColorSpace) {
2655 LOCAL_SET_GL_ERROR(
2656 GL_INVALID_ENUM, "glConvertYUVMailboxesToRGB",
2657 "planes_yuv_color_space must be a valid SkYUVColorSpace");
2658 return;
2659 }
2660 SkYUVColorSpace src_color_space =
2661 static_cast<SkYUVColorSpace>(planes_yuv_color_space);
2662
2663 YUVConversionMailboxIndex idx(is_nv12);
2664
2665 // Mailboxes are sent over in the order y_plane, u_plane, v_plane, destination
2666 std::array<gpu::Mailbox, YUVConversionMailboxIndex::kTotalMailboxes>
2667 mailboxes;
2668 for (idx.reset(); idx() < mailboxes.size(); ++idx) {
2669 mailboxes[idx()] = Mailbox::FromVolatile(
2670 reinterpret_cast<const volatile Mailbox*>(mailboxes_in)[idx()]);
2671 DLOG_IF(ERROR, !mailboxes[idx()].Verify())
2672 << "ConvertYUVMailboxesToRGB was "
2673 "passed an invalid mailbox: "
2674 << idx.ToString();
2675 }
2676
2677 std::array<std::unique_ptr<SharedImageRepresentationSkia>,
2678 YUVConversionMailboxIndex::kTotalMailboxes>
2679 images;
2680 for (idx.reset(); idx() < images.size(); ++idx) {
2681 images[idx()] = shared_image_representation_factory_.ProduceSkia(
2682 mailboxes[idx()], shared_context_state_);
2683 if (!images[idx()]) {
2684 LOCAL_SET_GL_ERROR(
2685 GL_INVALID_OPERATION, "glConvertYUVMailboxesToRGB",
2686 ("Attempting to operate on unknown mailbox:" + idx.ToString())
2687 .c_str());
2688 return;
2689 }
2690 }
2691
2692 std::vector<GrBackendSemaphore> begin_semaphores;
2693 std::vector<GrBackendSemaphore> end_semaphores;
2694
2695 auto dest_scoped_access =
2696 images[YUVConversionMailboxIndex::kDestIndex]->BeginScopedWriteAccess(
2697 &begin_semaphores, &end_semaphores,
2698 SharedImageRepresentation::AllowUnclearedAccess::kYes);
2699 if (!dest_scoped_access) {
2700 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glConvertYUVMailboxesToRGB",
2701 "Destination shared image is not writable");
2702 DCHECK(begin_semaphores.empty());
2703 return;
2704 }
2705
2706 bool source_access_valid = true;
2707 std::array<std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>,
2708 YUVConversionMailboxIndex::kNumInputMailboxes>
2709 source_scoped_access;
2710 for (idx.reset(); idx() < source_scoped_access.size(); ++idx) {
2711 source_scoped_access[idx()] = images[idx()]->BeginScopedReadAccess(
2712 &begin_semaphores, &end_semaphores);
2713
2714 if (!source_scoped_access[idx()]) {
2715 LOCAL_SET_GL_ERROR(
2716 GL_INVALID_OPERATION, "glConvertYUVMailboxesToRGB",
2717 ("Couldn't access shared image for mailbox:" + idx.ToString())
2718 .c_str());
2719 source_access_valid = false;
2720 break;
2721 }
2722 }
2723
2724 auto* dest_surface = dest_scoped_access->surface();
2725 if (!begin_semaphores.empty()) {
2726 bool result =
2727 dest_surface->wait(begin_semaphores.size(), begin_semaphores.data(),
2728 /*deleteSemaphoresAfterWait=*/false);
2729 DCHECK(result);
2730 }
2731
2732 bool drew_image = false;
2733 if (source_access_valid) {
2734 std::array<GrBackendTexture, YUVConversionMailboxIndex::kNumInputMailboxes>
2735 yuva_textures;
2736 for (idx.reset(); idx() < yuva_textures.size(); ++idx) {
2737 yuva_textures[idx()] = source_scoped_access[idx()]
2738 ->promise_image_texture()
2739 ->backendTexture();
2740 }
2741
2742 SkISize dest_size =
2743 SkISize::Make(dest_surface->width(), dest_surface->height());
2744 SkYUVAInfo::PlanarConfig planar_config =
2745 is_nv12 ? SkYUVAInfo::PlanarConfig::kY_UV_420
2746 : SkYUVAInfo::PlanarConfig::kY_U_V_420;
2747 SkYUVAInfo yuva_info(dest_size, planar_config, src_color_space);
2748 GrYUVABackendTextures yuva_backend_textures(yuva_info, yuva_textures.data(),
2749 kTopLeft_GrSurfaceOrigin);
2750 auto result_image =
2751 SkImage::MakeFromYUVATextures(gr_context(), yuva_backend_textures);
2752 if (!result_image) {
2753 LOCAL_SET_GL_ERROR(
2754 GL_INVALID_OPERATION, "glConvertYUVMailboxesToRGB",
2755 "Couldn't create destination images from provided sources");
2756 } else {
2757 dest_surface->getCanvas()->drawImage(result_image, 0, 0);
2758 drew_image = true;
2759 }
2760 }
2761
2762 FlushAndSubmitIfNecessary(dest_scoped_access->surface(),
2763 std::move(end_semaphores));
2764 if (!images[YUVConversionMailboxIndex::kDestIndex]->IsCleared() &&
2765 drew_image) {
2766 images[YUVConversionMailboxIndex::kDestIndex]->SetCleared();
2767 }
2768 }
2769
2770 namespace {
2771
2772 // Helper to read client data from transfer cache.
2773 class TransferCacheDeserializeHelperImpl final
2774 : public cc::TransferCacheDeserializeHelper {
2775 public:
TransferCacheDeserializeHelperImpl(int raster_decoder_id,ServiceTransferCache * transfer_cache)2776 explicit TransferCacheDeserializeHelperImpl(
2777 int raster_decoder_id,
2778 ServiceTransferCache* transfer_cache)
2779 : raster_decoder_id_(raster_decoder_id), transfer_cache_(transfer_cache) {
2780 DCHECK(transfer_cache_);
2781 }
2782 ~TransferCacheDeserializeHelperImpl() override = default;
2783
CreateLocalEntry(uint32_t id,std::unique_ptr<cc::ServiceTransferCacheEntry> entry)2784 void CreateLocalEntry(
2785 uint32_t id,
2786 std::unique_ptr<cc::ServiceTransferCacheEntry> entry) override {
2787 auto type = entry->Type();
2788 transfer_cache_->CreateLocalEntry(
2789 ServiceTransferCache::EntryKey(raster_decoder_id_, type, id),
2790 std::move(entry));
2791 }
2792
2793 private:
GetEntryInternal(cc::TransferCacheEntryType entry_type,uint32_t entry_id)2794 cc::ServiceTransferCacheEntry* GetEntryInternal(
2795 cc::TransferCacheEntryType entry_type,
2796 uint32_t entry_id) override {
2797 return transfer_cache_->GetEntry(ServiceTransferCache::EntryKey(
2798 raster_decoder_id_, entry_type, entry_id));
2799 }
2800
2801 const int raster_decoder_id_;
2802 ServiceTransferCache* const transfer_cache_;
2803
2804 DISALLOW_COPY_AND_ASSIGN(TransferCacheDeserializeHelperImpl);
2805 };
2806
2807 } // namespace
2808
DeletePaintCacheTextBlobsINTERNALHelper(GLsizei n,const volatile GLuint * paint_cache_ids)2809 void RasterDecoderImpl::DeletePaintCacheTextBlobsINTERNALHelper(
2810 GLsizei n,
2811 const volatile GLuint* paint_cache_ids) {
2812 if (!supports_oop_raster_) {
2813 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
2814 "glDeletePaintCacheEntriesINTERNAL",
2815 "No chromium raster support");
2816 return;
2817 }
2818
2819 paint_cache_->Purge(cc::PaintCacheDataType::kTextBlob, n, paint_cache_ids);
2820 }
2821
DeletePaintCachePathsINTERNALHelper(GLsizei n,const volatile GLuint * paint_cache_ids)2822 void RasterDecoderImpl::DeletePaintCachePathsINTERNALHelper(
2823 GLsizei n,
2824 const volatile GLuint* paint_cache_ids) {
2825 if (!supports_oop_raster_) {
2826 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
2827 "glDeletePaintCacheEntriesINTERNAL",
2828 "No chromium raster support");
2829 return;
2830 }
2831
2832 paint_cache_->Purge(cc::PaintCacheDataType::kPath, n, paint_cache_ids);
2833 }
2834
DoClearPaintCacheINTERNAL()2835 void RasterDecoderImpl::DoClearPaintCacheINTERNAL() {
2836 if (!supports_oop_raster_) {
2837 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glClearPaintCacheINTERNAL",
2838 "No chromium raster support");
2839 return;
2840 }
2841
2842 paint_cache_->PurgeAll();
2843 }
2844
DoBeginRasterCHROMIUM(GLuint sk_color,GLuint msaa_sample_count,GLboolean can_use_lcd_text,const volatile GLbyte * key)2845 void RasterDecoderImpl::DoBeginRasterCHROMIUM(GLuint sk_color,
2846 GLuint msaa_sample_count,
2847 GLboolean can_use_lcd_text,
2848 const volatile GLbyte* key) {
2849 // Workaround for https://crbug.com/906453: Flush before BeginRaster (the
2850 // commands between BeginRaster and EndRaster will not flush).
2851 FlushToWorkAroundMacCrashes();
2852
2853 if (!gr_context() || !supports_oop_raster_) {
2854 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
2855 "chromium_raster_transport not enabled via attribs");
2856 return;
2857 }
2858 if (sk_surface_) {
2859 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
2860 "BeginRasterCHROMIUM without EndRasterCHROMIUM");
2861 return;
2862 }
2863
2864 Mailbox mailbox =
2865 Mailbox::FromVolatile(*reinterpret_cast<const volatile Mailbox*>(key));
2866 DLOG_IF(ERROR, !mailbox.Verify()) << "BeginRasterCHROMIUM was "
2867 "passed a mailbox that was not "
2868 "generated by ProduceTextureCHROMIUM.";
2869
2870 DCHECK(!shared_image_);
2871 shared_image_ = shared_image_representation_factory_.ProduceSkia(
2872 mailbox, shared_context_state_.get());
2873 if (!shared_image_) {
2874 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glBeginRasterCHROMIUM",
2875 "passed invalid mailbox.");
2876 return;
2877 }
2878
2879 DCHECK(locked_handles_.empty());
2880 DCHECK(!raster_canvas_);
2881 shared_context_state_->set_need_context_state_reset(true);
2882
2883 // Use unknown pixel geometry to disable LCD text.
2884 uint32_t flags = 0;
2885 SkSurfaceProps surface_props(flags, kUnknown_SkPixelGeometry);
2886 if (can_use_lcd_text) {
2887 surface_props = skia::LegacyDisplayGlobals::GetSkSurfaceProps(flags);
2888 }
2889
2890 SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
2891 /*gpu_compositing=*/true, shared_image_->format());
2892 // If we can't match requested MSAA samples, don't use MSAA.
2893 int final_msaa_count = std::max(static_cast<int>(msaa_sample_count), 0);
2894 if (final_msaa_count >
2895 gr_context()->maxSurfaceSampleCountForColorType(sk_color_type))
2896 final_msaa_count = 0;
2897
2898 std::vector<GrBackendSemaphore> begin_semaphores;
2899 DCHECK(end_semaphores_.empty());
2900 DCHECK(!scoped_shared_image_write_);
2901 // Allow uncleared access, as raster specifically handles uncleared images
2902 // by clearing them before writing.
2903 scoped_shared_image_write_ = shared_image_->BeginScopedWriteAccess(
2904 final_msaa_count, surface_props, &begin_semaphores, &end_semaphores_,
2905 SharedImageRepresentation::AllowUnclearedAccess::kYes);
2906 if (!scoped_shared_image_write_) {
2907 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
2908 "failed to create surface");
2909 shared_image_.reset();
2910 return;
2911 }
2912
2913 sk_surface_ = scoped_shared_image_write_->surface();
2914
2915 if (!begin_semaphores.empty()) {
2916 bool result =
2917 sk_surface_->wait(begin_semaphores.size(), begin_semaphores.data(),
2918 /*deleteSemaphoresAfterWait=*/false);
2919 DCHECK(result);
2920 }
2921
2922 if (use_ddl_) {
2923 SkSurfaceCharacterization characterization;
2924 bool result = sk_surface_->characterize(&characterization);
2925 DCHECK(result) << "Failed to characterize raster SkSurface.";
2926 recorder_ =
2927 std::make_unique<SkDeferredDisplayListRecorder>(characterization);
2928 raster_canvas_ = recorder_->getCanvas();
2929 } else {
2930 raster_canvas_ = sk_surface_->getCanvas();
2931 }
2932
2933 paint_op_shared_image_provider_ = std::make_unique<SharedImageProviderImpl>(
2934 &shared_image_representation_factory_, shared_context_state_, sk_surface_,
2935 &end_semaphores_, error_state_.get());
2936
2937 // All or nothing clearing, as no way to validate the client's input on what
2938 // is the "used" part of the texture.
2939 // TODO(enne): This doesn't handle the case where the background color
2940 // changes and so any extra pixels outside the raster area that get
2941 // sampled may be incorrect.
2942 if (shared_image_->IsCleared())
2943 return;
2944
2945 raster_canvas_->drawColor(sk_color);
2946 shared_image_->SetCleared();
2947 }
2948
GetShmBuffer(uint32_t shm_id)2949 scoped_refptr<Buffer> RasterDecoderImpl::GetShmBuffer(uint32_t shm_id) {
2950 return GetSharedMemoryBuffer(shm_id);
2951 }
2952
ReportProgress()2953 void RasterDecoderImpl::ReportProgress() {
2954 if (shared_context_state_->progress_reporter())
2955 shared_context_state_->progress_reporter()->ReportProgress();
2956 }
2957
DoRasterCHROMIUM(GLuint raster_shm_id,GLuint raster_shm_offset,GLuint raster_shm_size,GLuint font_shm_id,GLuint font_shm_offset,GLuint font_shm_size)2958 void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
2959 GLuint raster_shm_offset,
2960 GLuint raster_shm_size,
2961 GLuint font_shm_id,
2962 GLuint font_shm_offset,
2963 GLuint font_shm_size) {
2964 TRACE_EVENT1("gpu", "RasterDecoderImpl::DoRasterCHROMIUM", "raster_id",
2965 ++raster_chromium_id_);
2966
2967 if (!sk_surface_) {
2968 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glRasterCHROMIUM",
2969 "RasterCHROMIUM without BeginRasterCHROMIUM");
2970 return;
2971 }
2972 DCHECK(transfer_cache());
2973 shared_context_state_->set_need_context_state_reset(true);
2974
2975 if (font_shm_size > 0) {
2976 // Deserialize fonts before raster.
2977 volatile char* font_buffer_memory =
2978 GetSharedMemoryAs<char*>(font_shm_id, font_shm_offset, font_shm_size);
2979 if (!font_buffer_memory) {
2980 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM",
2981 "Can not read font buffer.");
2982 return;
2983 }
2984
2985 std::vector<SkDiscardableHandleId> new_locked_handles;
2986 if (!font_manager_->Deserialize(font_buffer_memory, font_shm_size,
2987 &new_locked_handles)) {
2988 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM",
2989 "Invalid font buffer.");
2990 return;
2991 }
2992 locked_handles_.insert(locked_handles_.end(), new_locked_handles.begin(),
2993 new_locked_handles.end());
2994 }
2995
2996 char* paint_buffer_memory = GetSharedMemoryAs<char*>(
2997 raster_shm_id, raster_shm_offset, raster_shm_size);
2998 if (!paint_buffer_memory) {
2999 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM",
3000 "Can not read paint buffer.");
3001 return;
3002 }
3003
3004 alignas(
3005 cc::PaintOpBuffer::PaintOpAlign) char data[sizeof(cc::LargestPaintOp)];
3006
3007 cc::PlaybackParams playback_params(nullptr, SkMatrix::I());
3008 TransferCacheDeserializeHelperImpl impl(raster_decoder_id_, transfer_cache());
3009 cc::PaintOp::DeserializeOptions options(
3010 &impl, paint_cache_.get(), font_manager_->strike_client(),
3011 shared_context_state_->scratch_deserialization_buffer(), is_privileged_,
3012 paint_op_shared_image_provider_.get());
3013 options.crash_dump_on_failure =
3014 !gpu_preferences_.disable_oopr_debug_crash_dump;
3015
3016 size_t paint_buffer_size = raster_shm_size;
3017 gl::ScopedProgressReporter report_progress(
3018 shared_context_state_->progress_reporter());
3019 while (paint_buffer_size > 0) {
3020 size_t skip = 0;
3021 cc::PaintOp* deserialized_op = cc::PaintOp::Deserialize(
3022 paint_buffer_memory, paint_buffer_size, &data[0],
3023 sizeof(cc::LargestPaintOp), &skip, options);
3024 if (!deserialized_op) {
3025 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glRasterCHROMIUM",
3026 "RasterCHROMIUM: serialization failure");
3027 return;
3028 }
3029
3030 deserialized_op->Raster(raster_canvas_, playback_params);
3031 deserialized_op->DestroyThis();
3032
3033 paint_buffer_size -= skip;
3034 paint_buffer_memory += skip;
3035 }
3036 }
3037
EnsureDDLReadyForRaster()3038 bool RasterDecoderImpl::EnsureDDLReadyForRaster() {
3039 DCHECK(use_ddl_);
3040 DCHECK_EQ(current_decoder_error_, error::kNoError);
3041
3042 if (!ddl_) {
3043 DCHECK(recorder_);
3044 DCHECK(!program_iterator_);
3045
3046 TRACE_EVENT0("gpu",
3047 "RasterDecoderImpl::EnsureDDLReadyForRaster::DetachDDL");
3048 ddl_ = recorder_->detach();
3049 program_iterator_.emplace(shared_context_state_->gr_context(), ddl_.get());
3050 }
3051
3052 while (!program_iterator_->done()) {
3053 TRACE_EVENT0("gpu",
3054 "RasterDecoderImpl::EnsureDDLReadyForRaster::MaybeCompile");
3055 bool did_compile = program_iterator_->compile();
3056 program_iterator_->next();
3057 if (did_compile)
3058 return false;
3059 }
3060
3061 program_iterator_.reset();
3062 return true;
3063 }
3064
DoEndRasterCHROMIUM()3065 void RasterDecoderImpl::DoEndRasterCHROMIUM() {
3066 TRACE_EVENT0("gpu", "RasterDecoderImpl::DoEndRasterCHROMIUM");
3067 if (!sk_surface_) {
3068 LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glEndRasterCHROMIUM",
3069 "EndRasterCHROMIUM without BeginRasterCHROMIUM");
3070 return;
3071 }
3072
3073 shared_context_state_->set_need_context_state_reset(true);
3074 raster_canvas_ = nullptr;
3075
3076 if (use_ddl_) {
3077 if (!EnsureDDLReadyForRaster()) {
3078 // This decoder error indicates that this command has not finished
3079 // executing. The decoder will yield and re-execute this command when it
3080 // resumes decoding.
3081 current_decoder_error_ = error::kDeferCommandUntilLater;
3082 return;
3083 }
3084 TRACE_EVENT0("gpu", "RasterDecoderImpl::DoEndRasterCHROMIUM::DrawDDL");
3085 sk_surface_->draw(std::move(ddl_));
3086 }
3087
3088 {
3089 TRACE_EVENT0("gpu", "RasterDecoderImpl::DoEndRasterCHROMIUM::Flush");
3090 // This is a slow operation since skia will execute the GPU work for the
3091 // complete tile. Make sure the progress reporter is notified to avoid
3092 // hangs.
3093 gl::ScopedProgressReporter report_progress(
3094 shared_context_state_->progress_reporter());
3095 FlushAndSubmitIfNecessary(sk_surface_, std::move(end_semaphores_));
3096 end_semaphores_.clear();
3097 }
3098
3099 shared_context_state_->UpdateSkiaOwnedMemorySize();
3100 sk_surface_ = nullptr;
3101 scoped_shared_image_write_.reset();
3102 shared_image_.reset();
3103 paint_op_shared_image_provider_.reset();
3104
3105 // Test only path for SetUpForRasterCHROMIUMForTest.
3106 sk_surface_for_testing_.reset();
3107
3108 // Unlock all font handles. This needs to be deferred until
3109 // SkSurface::flush since that flushes batched Gr operations
3110 // in skia that access the glyph data.
3111 // TODO(khushalsagar): We just unlocked a bunch of handles, do we need to
3112 // give a call to skia to attempt to purge any unlocked handles?
3113 if (!font_manager_->Unlock(locked_handles_)) {
3114 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM",
3115 "Invalid font discardable handle.");
3116 }
3117 locked_handles_.clear();
3118
3119 // We just flushed a tile's worth of GPU work from the SkSurface in
3120 // flush above. Yield to the Scheduler to allow pre-emption before
3121 // processing more commands.
3122 ExitCommandProcessingEarly();
3123 }
3124
DoCreateTransferCacheEntryINTERNAL(GLuint raw_entry_type,GLuint entry_id,GLuint handle_shm_id,GLuint handle_shm_offset,GLuint data_shm_id,GLuint data_shm_offset,GLuint data_size)3125 void RasterDecoderImpl::DoCreateTransferCacheEntryINTERNAL(
3126 GLuint raw_entry_type,
3127 GLuint entry_id,
3128 GLuint handle_shm_id,
3129 GLuint handle_shm_offset,
3130 GLuint data_shm_id,
3131 GLuint data_shm_offset,
3132 GLuint data_size) {
3133 if (!supports_oop_raster_) {
3134 LOCAL_SET_GL_ERROR(
3135 GL_INVALID_VALUE, "glCreateTransferCacheEntryINTERNAL",
3136 "Attempt to use OOP transfer cache on a context without OOP raster.");
3137 return;
3138 }
3139 DCHECK(gr_context());
3140 DCHECK(transfer_cache());
3141
3142 // Validate the type we are about to create.
3143 cc::TransferCacheEntryType entry_type;
3144 if (!cc::ServiceTransferCacheEntry::SafeConvertToType(raw_entry_type,
3145 &entry_type)) {
3146 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCreateTransferCacheEntryINTERNAL",
3147 "Attempt to use OOP transfer cache with an invalid "
3148 "cache entry type.");
3149 return;
3150 }
3151
3152 if (entry_type == cc::TransferCacheEntryType::kSkottie && !is_privileged_) {
3153 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCreateTransferCacheEntryINTERNAL",
3154 "Attempt to use skottie on a non privileged channel");
3155 return;
3156 }
3157
3158 uint8_t* data_memory =
3159 GetSharedMemoryAs<uint8_t*>(data_shm_id, data_shm_offset, data_size);
3160 if (!data_memory) {
3161 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCreateTransferCacheEntryINTERNAL",
3162 "Can not read transfer cache entry data.");
3163 return;
3164 }
3165
3166 scoped_refptr<Buffer> handle_buffer = GetSharedMemoryBuffer(handle_shm_id);
3167 if (!DiscardableHandleBase::ValidateParameters(handle_buffer.get(),
3168 handle_shm_offset)) {
3169 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCreateTransferCacheEntryINTERNAL",
3170 "Invalid shm for discardable handle.");
3171 return;
3172 }
3173 ServiceDiscardableHandle handle(std::move(handle_buffer), handle_shm_offset,
3174 handle_shm_id);
3175
3176 // If the entry is going to use skia during deserialization, make sure we
3177 // mark the context state dirty.
3178 GrDirectContext* context_for_entry =
3179 cc::ServiceTransferCacheEntry::UsesGrContext(entry_type) ? gr_context()
3180 : nullptr;
3181 if (context_for_entry)
3182 shared_context_state_->set_need_context_state_reset(true);
3183
3184 if (!transfer_cache()->CreateLockedEntry(
3185 ServiceTransferCache::EntryKey(raster_decoder_id_, entry_type,
3186 entry_id),
3187 handle, context_for_entry, base::make_span(data_memory, data_size))) {
3188 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCreateTransferCacheEntryINTERNAL",
3189 "Failure to deserialize transfer cache entry.");
3190 return;
3191 }
3192
3193 // The only entry using the GrContext are image transfer cache entries for
3194 // image uploads. Since this tends to a slow operation, yield to allow the
3195 // decoder to be pre-empted.
3196 if (context_for_entry)
3197 ExitCommandProcessingEarly();
3198 }
3199
DoUnlockTransferCacheEntryINTERNAL(GLuint raw_entry_type,GLuint entry_id)3200 void RasterDecoderImpl::DoUnlockTransferCacheEntryINTERNAL(
3201 GLuint raw_entry_type,
3202 GLuint entry_id) {
3203 if (!supports_oop_raster_) {
3204 LOCAL_SET_GL_ERROR(
3205 GL_INVALID_VALUE, "glUnlockTransferCacheEntryINTERNAL",
3206 "Attempt to use OOP transfer cache on a context without OOP raster.");
3207 return;
3208 }
3209 DCHECK(transfer_cache());
3210 cc::TransferCacheEntryType entry_type;
3211 if (!cc::ServiceTransferCacheEntry::SafeConvertToType(raw_entry_type,
3212 &entry_type)) {
3213 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glUnlockTransferCacheEntryINTERNAL",
3214 "Attempt to use OOP transfer cache with an invalid "
3215 "cache entry type.");
3216 return;
3217 }
3218
3219 if (!transfer_cache()->UnlockEntry(ServiceTransferCache::EntryKey(
3220 raster_decoder_id_, entry_type, entry_id))) {
3221 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glUnlockTransferCacheEntryINTERNAL",
3222 "Attempt to unlock an invalid ID");
3223 }
3224 }
3225
DoDeleteTransferCacheEntryINTERNAL(GLuint raw_entry_type,GLuint entry_id)3226 void RasterDecoderImpl::DoDeleteTransferCacheEntryINTERNAL(
3227 GLuint raw_entry_type,
3228 GLuint entry_id) {
3229 if (!supports_oop_raster_) {
3230 LOCAL_SET_GL_ERROR(
3231 GL_INVALID_VALUE, "glDeleteTransferCacheEntryINTERNAL",
3232 "Attempt to use OOP transfer cache on a context without OOP raster.");
3233 return;
3234 }
3235 DCHECK(transfer_cache());
3236 cc::TransferCacheEntryType entry_type;
3237 if (!cc::ServiceTransferCacheEntry::SafeConvertToType(raw_entry_type,
3238 &entry_type)) {
3239 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glDeleteTransferCacheEntryINTERNAL",
3240 "Attempt to use OOP transfer cache with an invalid "
3241 "cache entry type.");
3242 return;
3243 }
3244
3245 if (!transfer_cache()->DeleteEntry(ServiceTransferCache::EntryKey(
3246 raster_decoder_id_, entry_type, entry_id))) {
3247 LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glDeleteTransferCacheEntryINTERNAL",
3248 "Attempt to delete an invalid ID");
3249 }
3250 }
3251
RestoreStateForAttrib(GLuint attrib_index,bool restore_array_binding)3252 void RasterDecoderImpl::RestoreStateForAttrib(GLuint attrib_index,
3253 bool restore_array_binding) {
3254 shared_context_state_->PessimisticallyResetGrContext();
3255 }
3256
3257 // Include the auto-generated part of this file. We split this because it means
3258 // we can easily edit the non-auto generated parts right here in this file
3259 // instead of having to edit some template or the code generator.
3260 #include "base/macros.h"
3261 #include "build/chromeos_buildflags.h"
3262 #include "gpu/command_buffer/service/raster_decoder_autogen.h"
3263
3264 } // namespace raster
3265 } // namespace gpu
3266