1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/dawn/GrDawnGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkConvertPixels.h"
15 #include "src/gpu/GrDataUtils.h"
16 #include "src/gpu/GrDirectContextPriv.h"
17 #include "src/gpu/GrGeometryProcessor.h"
18 #include "src/gpu/GrGpuResourceCacheAccess.h"
19 #include "src/gpu/GrPipeline.h"
20 #include "src/gpu/GrRenderTarget.h"
21 #include "src/gpu/GrSemaphore.h"
22 #include "src/gpu/GrStencilSettings.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/dawn/GrDawnAttachment.h"
25 #include "src/gpu/dawn/GrDawnBuffer.h"
26 #include "src/gpu/dawn/GrDawnCaps.h"
27 #include "src/gpu/dawn/GrDawnOpsRenderPass.h"
28 #include "src/gpu/dawn/GrDawnProgramBuilder.h"
29 #include "src/gpu/dawn/GrDawnRenderTarget.h"
30 #include "src/gpu/dawn/GrDawnTexture.h"
31 #include "src/gpu/dawn/GrDawnUtil.h"
32
33 #include "src/core/SkAutoMalloc.h"
34 #include "src/core/SkMipmap.h"
35 #include "src/sksl/SkSLCompiler.h"
36
37 #if !defined(SK_BUILD_FOR_WIN)
38 #include <unistd.h>
39 #endif // !defined(SK_BUILD_FOR_WIN)
40
41 static const int kMaxRenderPipelineEntries = 1024;
42
43 namespace {
44
45 class Fence {
46 public:
Fence(const wgpu::Device & device,const wgpu::Fence & fence)47 Fence(const wgpu::Device& device, const wgpu::Fence& fence)
48 : fDevice(device), fFence(fence), fCalled(false) {
49 fFence.OnCompletion(0, callback, this);
50 }
51
callback(WGPUFenceCompletionStatus status,void * userData)52 static void callback(WGPUFenceCompletionStatus status, void* userData) {
53 Fence* fence = static_cast<Fence*>(userData);
54 fence->fCalled = true;
55 }
56
check()57 bool check() {
58 fDevice.Tick();
59 return fCalled;
60 }
61
fence()62 wgpu::Fence fence() { return fFence; }
63
64 private:
65 wgpu::Device fDevice;
66 wgpu::Fence fFence;
67 bool fCalled;
68 };
69
70 }
71
to_dawn_filter_mode(GrSamplerState::Filter filter)72 static wgpu::FilterMode to_dawn_filter_mode(GrSamplerState::Filter filter) {
73 switch (filter) {
74 case GrSamplerState::Filter::kNearest:
75 return wgpu::FilterMode::Nearest;
76 case GrSamplerState::Filter::kLinear:
77 return wgpu::FilterMode::Linear;
78 default:
79 SkASSERT(!"unsupported filter mode");
80 return wgpu::FilterMode::Nearest;
81 }
82 }
83
to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode)84 static wgpu::FilterMode to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode) {
85 switch (mode) {
86 case GrSamplerState::MipmapMode::kNone:
87 // Fall-through (Dawn does not have an equivalent for "None")
88 case GrSamplerState::MipmapMode::kNearest:
89 return wgpu::FilterMode::Nearest;
90 case GrSamplerState::MipmapMode::kLinear:
91 return wgpu::FilterMode::Linear;
92 default:
93 SkASSERT(!"unsupported filter mode");
94 return wgpu::FilterMode::Nearest;
95 }
96 }
97
to_dawn_address_mode(GrSamplerState::WrapMode wrapMode)98 static wgpu::AddressMode to_dawn_address_mode(GrSamplerState::WrapMode wrapMode) {
99 switch (wrapMode) {
100 case GrSamplerState::WrapMode::kClamp:
101 return wgpu::AddressMode::ClampToEdge;
102 case GrSamplerState::WrapMode::kRepeat:
103 return wgpu::AddressMode::Repeat;
104 case GrSamplerState::WrapMode::kMirrorRepeat:
105 return wgpu::AddressMode::MirrorRepeat;
106 case GrSamplerState::WrapMode::kClampToBorder:
107 SkASSERT(!"unsupported address mode");
108 }
109 SkASSERT(!"unsupported address mode");
110 return wgpu::AddressMode::ClampToEdge;
111 }
112
Make(const wgpu::Device & device,const GrContextOptions & options,GrDirectContext * direct)113 sk_sp<GrGpu> GrDawnGpu::Make(const wgpu::Device& device,
114 const GrContextOptions& options, GrDirectContext* direct) {
115 if (!device) {
116 return nullptr;
117 }
118
119 return sk_sp<GrGpu>(new GrDawnGpu(direct, options, device));
120 }
121
122 ////////////////////////////////////////////////////////////////////////////////
123
GrDawnGpu(GrDirectContext * direct,const GrContextOptions & options,const wgpu::Device & device)124 GrDawnGpu::GrDawnGpu(GrDirectContext* direct, const GrContextOptions& options,
125 const wgpu::Device& device)
126 : INHERITED(direct)
127 , fDevice(device)
128 , fQueue(device.GetDefaultQueue())
129 , fUniformRingBuffer(this, wgpu::BufferUsage::Uniform)
130 , fStagingBufferManager(this)
131 , fRenderPipelineCache(kMaxRenderPipelineEntries)
132 , fFinishCallbacks(this) {
133 fCaps.reset(new GrDawnCaps(options));
134 fCompiler.reset(new SkSL::Compiler(fCaps->shaderCaps()));
135 }
136
~GrDawnGpu()137 GrDawnGpu::~GrDawnGpu() {
138 this->waitOnAllBusyStagingBuffers();
139 }
140
disconnect(DisconnectType type)141 void GrDawnGpu::disconnect(DisconnectType type) {
142 if (DisconnectType::kCleanup == type) {
143 this->waitOnAllBusyStagingBuffers();
144 }
145 fStagingBufferManager.reset();
146 fQueue = nullptr;
147 fDevice = nullptr;
148 INHERITED::disconnect(type);
149 }
150
151 ///////////////////////////////////////////////////////////////////////////////
152
onGetOpsRenderPass(GrRenderTarget * rt,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)153 GrOpsRenderPass* GrDawnGpu::onGetOpsRenderPass(
154 GrRenderTarget* rt,
155 GrAttachment*,
156 GrSurfaceOrigin origin,
157 const SkIRect& bounds,
158 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
159 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
160 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
161 GrXferBarrierFlags renderPassXferBarriers) {
162 fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
163 return fOpsRenderPass.get();
164 }
165
166 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)167 sk_sp<GrGpuBuffer> GrDawnGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
168 GrAccessPattern accessPattern, const void* data) {
169 sk_sp<GrGpuBuffer> b(new GrDawnBuffer(this, size, type, accessPattern));
170 if (data && b) {
171 b->updateData(data, size);
172 }
173 return b;
174 }
175
176 ////////////////////////////////////////////////////////////////////////////////
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)177 bool GrDawnGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
178 GrColorType surfaceColorType, GrColorType srcColorType,
179 const GrMipLevel texels[], int mipLevelCount,
180 bool prepForTexSampling) {
181 GrDawnTexture* texture = static_cast<GrDawnTexture*>(surface->asTexture());
182 if (!texture) {
183 return false;
184 }
185 this->uploadTextureData(srcColorType, texels, mipLevelCount,
186 SkIRect::MakeXYWH(left, top, width, height), texture->texture());
187 if (mipLevelCount < texture->maxMipmapLevel() + 1) {
188 texture->markMipmapsDirty();
189 }
190 return true;
191 }
192
onTransferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType textureColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t bufferOffset,size_t rowBytes)193 bool GrDawnGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
194 GrColorType textureColorType, GrColorType bufferColorType,
195 GrGpuBuffer* transferBuffer, size_t bufferOffset,
196 size_t rowBytes) {
197 SkASSERT(!"unimplemented");
198 return false;
199 }
200
onTransferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)201 bool GrDawnGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
202 GrColorType surfaceColorType, GrColorType bufferColorType,
203 GrGpuBuffer* transferBuffer, size_t offset) {
204 SkASSERT(!"unimplemented");
205 return false;
206 }
207
208 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected,int mipLevelCount,uint32_t levelClearMask)209 sk_sp<GrTexture> GrDawnGpu::onCreateTexture(SkISize dimensions,
210 const GrBackendFormat& backendFormat,
211 GrRenderable renderable,
212 int renderTargetSampleCnt,
213 SkBudgeted budgeted,
214 GrProtected,
215 int mipLevelCount,
216 uint32_t levelClearMask) {
217 if (levelClearMask) {
218 return nullptr;
219 }
220
221 wgpu::TextureFormat format;
222 if (!backendFormat.asDawnFormat(&format)) {
223 return nullptr;
224 }
225
226 GrMipmapStatus mipmapStatus =
227 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
228
229 return GrDawnTexture::Make(this, dimensions, format, renderable, renderTargetSampleCnt,
230 budgeted, mipLevelCount, mipmapStatus);
231 }
232
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat &,SkBudgeted,GrMipmapped,GrProtected,const void * data,size_t dataSize)233 sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat&,
234 SkBudgeted, GrMipmapped, GrProtected,
235 const void* data, size_t dataSize) {
236 SkASSERT(!"unimplemented");
237 return nullptr;
238 }
239
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)240 sk_sp<GrTexture> GrDawnGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
241 GrWrapOwnership ownership,
242 GrWrapCacheable cacheable,
243 GrIOType ioType) {
244 GrDawnTextureInfo info;
245 if (!backendTex.getDawnTextureInfo(&info)) {
246 return nullptr;
247 }
248
249 SkISize dimensions = { backendTex.width(), backendTex.height() };
250 return GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kNo, 1, cacheable, ioType,
251 info);
252 }
253
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)254 sk_sp<GrTexture> GrDawnGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
255 GrWrapOwnership ownership,
256 GrWrapCacheable cacheable) {
257 return nullptr;
258 }
259
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrWrapOwnership,GrWrapCacheable cacheable)260 sk_sp<GrTexture> GrDawnGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
261 int sampleCnt,
262 GrWrapOwnership,
263 GrWrapCacheable cacheable) {
264 GrDawnTextureInfo info;
265 if (!tex.getDawnTextureInfo(&info) || !info.fTexture) {
266 return nullptr;
267 }
268
269 SkISize dimensions = { tex.width(), tex.height() };
270 sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
271 if (sampleCnt < 1) {
272 return nullptr;
273 }
274
275 sk_sp<GrTexture> result = GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kYes,
276 sampleCnt, cacheable, kRW_GrIOType, info);
277 result->markMipmapsDirty();
278 return result;
279 }
280
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt)281 sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt) {
282 GrDawnRenderTargetInfo info;
283 if (!rt.getDawnRenderTargetInfo(&info) || !info.fTextureView) {
284 return nullptr;
285 }
286
287 SkISize dimensions = { rt.width(), rt.height() };
288 int sampleCnt = 1;
289 return GrDawnRenderTarget::MakeWrapped(this, dimensions, sampleCnt, info);
290 }
291
makeStencilAttachmentForRenderTarget(const GrRenderTarget * rt,SkISize dimensions,int numStencilSamples)292 sk_sp<GrAttachment> GrDawnGpu::makeStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
293 SkISize dimensions,
294 int numStencilSamples) {
295 fStats.incStencilAttachmentCreates();
296 return GrDawnAttachment::MakeStencil(this, dimensions, numStencilSamples);
297 }
298
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)299 GrBackendTexture GrDawnGpu::onCreateBackendTexture(SkISize dimensions,
300 const GrBackendFormat& backendFormat,
301 GrRenderable renderable,
302 GrMipmapped mipMapped,
303 GrProtected isProtected) {
304 wgpu::TextureFormat format;
305 if (!backendFormat.asDawnFormat(&format)) {
306 return GrBackendTexture();
307 }
308
309 wgpu::TextureDescriptor desc;
310 desc.usage =
311 wgpu::TextureUsage::Sampled |
312 wgpu::TextureUsage::CopySrc |
313 wgpu::TextureUsage::CopyDst;
314
315 if (GrRenderable::kYes == renderable) {
316 desc.usage |= wgpu::TextureUsage::OutputAttachment;
317 }
318
319 int numMipLevels = 1;
320 if (mipMapped == GrMipmapped::kYes) {
321 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
322 }
323
324 desc.size.width = dimensions.width();
325 desc.size.height = dimensions.height();
326 desc.size.depth = 1;
327 desc.format = format;
328 desc.mipLevelCount = numMipLevels;
329
330 wgpu::Texture tex = this->device().CreateTexture(&desc);
331
332 GrDawnTextureInfo info;
333 info.fTexture = tex;
334 info.fFormat = desc.format;
335 info.fLevelCount = desc.mipLevelCount;
336 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
337 }
338
uploadTextureData(GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,const SkIRect & rect,wgpu::Texture texture)339 void GrDawnGpu::uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[],
340 int mipLevelCount, const SkIRect& rect,
341 wgpu::Texture texture) {
342 uint32_t x = rect.x();
343 uint32_t y = rect.y();
344 uint32_t width = rect.width();
345 uint32_t height = rect.height();
346
347 for (int i = 0; i < mipLevelCount; i++) {
348 const void* src = texels[i].fPixels;
349 size_t srcRowBytes = texels[i].fRowBytes;
350 SkColorType colorType = GrColorTypeToSkColorType(srcColorType);
351 size_t trimRowBytes = width * SkColorTypeBytesPerPixel(colorType);
352 size_t dstRowBytes = GrDawnRoundRowBytes(trimRowBytes);
353 size_t size = dstRowBytes * height;
354 GrStagingBufferManager::Slice slice =
355 this->stagingBufferManager()->allocateStagingBufferSlice(size);
356 SkRectMemcpy(slice.fOffsetMapPtr, dstRowBytes, src, srcRowBytes, trimRowBytes, height);
357
358 wgpu::BufferCopyView srcBuffer = {};
359 srcBuffer.buffer = static_cast<GrDawnBuffer*>(slice.fBuffer)->get();
360 srcBuffer.layout.offset = slice.fOffset;
361 srcBuffer.layout.bytesPerRow = dstRowBytes;
362 srcBuffer.layout.rowsPerImage = height;
363
364 wgpu::TextureCopyView dstTexture;
365 dstTexture.texture = texture;
366 dstTexture.mipLevel = i;
367 dstTexture.origin = {x, y, 0};
368
369 wgpu::Extent3D copySize = {width, height, 1};
370 this->getCopyEncoder().CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
371 x /= 2;
372 y /= 2;
373 width = std::max(1u, width / 2);
374 height = std::max(1u, height / 2);
375 }
376 }
377
onUpdateBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const BackendTextureData * data)378 bool GrDawnGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
379 sk_sp<GrRefCntedCallback> finishedCallback,
380 const BackendTextureData* data) {
381 GrDawnTextureInfo info;
382 SkAssertResult(backendTexture.getDawnTextureInfo(&info));
383
384 size_t bpp = GrDawnBytesPerBlock(info.fFormat);
385 size_t baseLayerSize = bpp * backendTexture.width() * backendTexture.height();
386 const void* pixels;
387 SkAutoMalloc defaultStorage(baseLayerSize);
388 if (data && data->type() == BackendTextureData::Type::kPixmaps) {
389 SkTDArray<GrMipLevel> texels;
390 GrColorType colorType = SkColorTypeToGrColorType(data->pixmap(0).colorType());
391 int numMipLevels = info.fLevelCount;
392 texels.append(numMipLevels);
393 for (int i = 0; i < numMipLevels; ++i) {
394 texels[i] = {data->pixmap(i).addr(), data->pixmap(i).rowBytes()};
395 }
396 SkIRect dstRect = SkIRect::MakeSize(backendTexture.dimensions());
397 this->uploadTextureData(colorType, texels.begin(), texels.count(), dstRect, info.fTexture);
398 return true;
399 }
400 pixels = defaultStorage.get();
401 GrColorType colorType;
402 if (!GrDawnFormatToGrColorType(info.fFormat, &colorType)) {
403 return false;
404 }
405 SkISize size{backendTexture.width(), backendTexture.height()};
406 GrImageInfo imageInfo(colorType, kUnpremul_SkAlphaType, nullptr, size);
407 GrClearImage(imageInfo, defaultStorage.get(), bpp * backendTexture.width(), data->color());
408 wgpu::Device device = this->device();
409 wgpu::CommandEncoder copyEncoder = this->getCopyEncoder();
410 int w = backendTexture.width(), h = backendTexture.height();
411 for (uint32_t i = 0; i < info.fLevelCount; i++) {
412 size_t origRowBytes = bpp * w;
413 size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
414 size_t size = rowBytes * h;
415 GrStagingBufferManager::Slice stagingBuffer =
416 this->stagingBufferManager()->allocateStagingBufferSlice(size);
417 if (rowBytes == origRowBytes) {
418 memcpy(stagingBuffer.fOffsetMapPtr, pixels, size);
419 } else {
420 const char* src = static_cast<const char*>(pixels);
421 char* dst = static_cast<char*>(stagingBuffer.fOffsetMapPtr);
422 for (int row = 0; row < h; row++) {
423 memcpy(dst, src, origRowBytes);
424 dst += rowBytes;
425 src += origRowBytes;
426 }
427 }
428 wgpu::BufferCopyView srcBuffer = {};
429 srcBuffer.buffer = static_cast<GrDawnBuffer*>(stagingBuffer.fBuffer)->get();
430 srcBuffer.layout.offset = stagingBuffer.fOffset;
431 srcBuffer.layout.bytesPerRow = rowBytes;
432 srcBuffer.layout.rowsPerImage = h;
433 wgpu::TextureCopyView dstTexture;
434 dstTexture.texture = info.fTexture;
435 dstTexture.mipLevel = i;
436 dstTexture.origin = {0, 0, 0};
437 wgpu::Extent3D copySize = {(uint32_t)w, (uint32_t)h, 1};
438 copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
439 w = std::max(1, w / 2);
440 h = std::max(1, h / 2);
441 }
442 return true;
443 }
444
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat &,GrMipmapped,GrProtected)445 GrBackendTexture GrDawnGpu::onCreateCompressedBackendTexture(
446 SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) {
447 return {};
448 }
449
onUpdateCompressedBackendTexture(const GrBackendTexture &,sk_sp<GrRefCntedCallback> finishedCallback,const BackendTextureData *)450 bool GrDawnGpu::onUpdateCompressedBackendTexture(const GrBackendTexture&,
451 sk_sp<GrRefCntedCallback> finishedCallback,
452 const BackendTextureData*) {
453 return false;
454 }
455
deleteBackendTexture(const GrBackendTexture & tex)456 void GrDawnGpu::deleteBackendTexture(const GrBackendTexture& tex) {
457 GrDawnTextureInfo info;
458 if (tex.getDawnTextureInfo(&info)) {
459 info.fTexture = nullptr;
460 }
461 }
462
compile(const GrProgramDesc &,const GrProgramInfo &)463 bool GrDawnGpu::compile(const GrProgramDesc&, const GrProgramInfo&) {
464 return false;
465 }
466
467 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const468 bool GrDawnGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
469 GrDawnTextureInfo info;
470 if (!tex.getDawnTextureInfo(&info)) {
471 return false;
472 }
473
474 return info.fTexture.Get();
475 }
476
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)477 GrBackendRenderTarget GrDawnGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
478 GrColorType colorType,
479 int sampleCnt,
480 GrProtected isProtected) {
481 if (dimensions.width() > this->caps()->maxTextureSize() ||
482 dimensions.height() > this->caps()->maxTextureSize()) {
483 return {};
484 }
485
486 // We don't support MSAA in this backend yet.
487 if (sampleCnt != 1) {
488 return {};
489 }
490
491 if (isProtected == GrProtected::kYes) {
492 return {};
493 }
494
495 wgpu::TextureFormat format;
496 if (!GrColorTypeToDawnFormat(colorType, &format)) {
497 return {};
498 }
499
500 wgpu::TextureDescriptor desc;
501 desc.usage =
502 wgpu::TextureUsage::CopySrc |
503 wgpu::TextureUsage::OutputAttachment;
504
505 desc.size.width = dimensions.width();
506 desc.size.height = dimensions.height();
507 desc.size.depth = 1;
508 desc.format = format;
509
510 wgpu::Texture tex = this->device().CreateTexture(&desc);
511
512 GrDawnRenderTargetInfo info;
513 info.fTextureView = tex.CreateView();
514 info.fFormat = desc.format;
515 info.fLevelCount = desc.mipLevelCount;
516
517 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 1, 0, info);
518 }
519
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)520 void GrDawnGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
521 GrDawnRenderTargetInfo info;
522 if (rt.getDawnRenderTargetInfo(&info)) {
523 info.fTextureView = nullptr;
524 }
525 }
526
testingOnly_flushGpuAndSync()527 void GrDawnGpu::testingOnly_flushGpuAndSync() {
528 this->submitToGpu(true);
529 }
530
531 #endif
532
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)533 void GrDawnGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
534 GrGpuFinishedContext finishedContext) {
535 fFinishCallbacks.add(finishedProc, finishedContext);
536 }
537
checkForCompletedStagingBuffers()538 void GrDawnGpu::checkForCompletedStagingBuffers() {
539 // We expect all the buffer maps to trigger in order of submission so we bail after the first
540 // non finished map since we always push new busy buffers to the back of our list.
541 while (!fBusyStagingBuffers.empty() && fBusyStagingBuffers.front()->isMapped()) {
542 fBusyStagingBuffers.pop_front();
543 }
544 }
545
waitOnAllBusyStagingBuffers()546 void GrDawnGpu::waitOnAllBusyStagingBuffers() {
547 while (!fBusyStagingBuffers.empty()) {
548 fDevice.Tick();
549 this->checkForCompletedStagingBuffers();
550 }
551 }
552
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)553 void GrDawnGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
554 fSubmittedStagingBuffers.push_back(std::move(buffer));
555 }
556
557
callback(WGPUFenceCompletionStatus status,void * userData)558 static void callback(WGPUFenceCompletionStatus status, void* userData) {
559 *static_cast<bool*>(userData) = true;
560 }
561
onSubmitToGpu(bool syncCpu)562 bool GrDawnGpu::onSubmitToGpu(bool syncCpu) {
563 this->flushCopyEncoder();
564 if (!fCommandBuffers.empty()) {
565 fQueue.Submit(fCommandBuffers.size(), &fCommandBuffers.front());
566 fCommandBuffers.clear();
567 }
568
569 this->moveStagingBuffersToBusyAndMapAsync();
570 if (syncCpu) {
571 wgpu::FenceDescriptor desc;
572 wgpu::Fence fence = fQueue.CreateFence(&desc);
573 bool called = false;
574 fence.OnCompletion(0, callback, &called);
575 while (!called) {
576 fDevice.Tick();
577 }
578 fFinishCallbacks.callAll(true);
579 }
580
581 this->checkForCompletedStagingBuffers();
582
583 return true;
584 }
585
get_dawn_texture_from_surface(GrSurface * src)586 static wgpu::Texture get_dawn_texture_from_surface(GrSurface* src) {
587 if (auto t = static_cast<GrDawnTexture*>(src->asTexture())) {
588 return t->texture();
589 } else {
590 return nullptr;
591 }
592 }
593
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)594 bool GrDawnGpu::onCopySurface(GrSurface* dst,
595 GrSurface* src,
596 const SkIRect& srcRect,
597 const SkIPoint& dstPoint) {
598 wgpu::Texture srcTexture = get_dawn_texture_from_surface(src);
599 wgpu::Texture dstTexture = get_dawn_texture_from_surface(dst);
600 if (!srcTexture || !dstTexture) {
601 return false;
602 }
603
604 uint32_t width = srcRect.width(), height = srcRect.height();
605
606 wgpu::TextureCopyView srcTextureView, dstTextureView;
607 srcTextureView.texture = srcTexture;
608 srcTextureView.origin = {(uint32_t) srcRect.x(), (uint32_t) srcRect.y(), 0};
609 dstTextureView.texture = dstTexture;
610 dstTextureView.origin = {(uint32_t) dstPoint.x(), (uint32_t) dstPoint.y(), 0};
611
612 wgpu::Extent3D copySize = {width, height, 1};
613 this->getCopyEncoder().CopyTextureToTexture(&srcTextureView, &dstTextureView, ©Size);
614 return true;
615 }
616
callback(WGPUBufferMapAsyncStatus status,void * userdata)617 static void callback(WGPUBufferMapAsyncStatus status, void* userdata) {
618 *static_cast<bool*>(userdata) = true;
619 }
620
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)621 bool GrDawnGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
622 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
623 size_t rowBytes) {
624 wgpu::Texture tex = get_dawn_texture_from_surface(surface);
625
626 if (!tex || 0 == rowBytes) {
627 return false;
628 }
629 size_t origRowBytes = rowBytes;
630 int origSizeInBytes = origRowBytes * height;
631 rowBytes = GrDawnRoundRowBytes(rowBytes);
632 int sizeInBytes = rowBytes * height;
633
634 wgpu::BufferDescriptor desc;
635 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
636 desc.size = sizeInBytes;
637
638 wgpu::Buffer buf = device().CreateBuffer(&desc);
639
640 wgpu::TextureCopyView srcTexture;
641 srcTexture.texture = tex;
642 srcTexture.origin = {(uint32_t) left, (uint32_t) top, 0};
643
644 wgpu::BufferCopyView dstBuffer = {};
645 dstBuffer.buffer = buf;
646 dstBuffer.layout.offset = 0;
647 dstBuffer.layout.bytesPerRow = rowBytes;
648 dstBuffer.layout.rowsPerImage = height;
649
650 wgpu::Extent3D copySize = {(uint32_t) width, (uint32_t) height, 1};
651 this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, ©Size);
652 this->submitToGpu(true);
653
654 bool mapped = false;
655 buf.MapAsync(wgpu::MapMode::Read, 0, 0, callback, &mapped);
656 while (!mapped) {
657 device().Tick();
658 }
659 const void* readPixelsPtr = buf.GetConstMappedRange();
660
661 if (rowBytes == origRowBytes) {
662 memcpy(buffer, readPixelsPtr, origSizeInBytes);
663 } else {
664 const char* src = static_cast<const char*>(readPixelsPtr);
665 char* dst = static_cast<char*>(buffer);
666 for (int row = 0; row < height; row++) {
667 memcpy(dst, src, origRowBytes);
668 dst += origRowBytes;
669 src += rowBytes;
670 }
671 }
672 buf.Unmap();
673 return true;
674 }
675
onRegenerateMipMapLevels(GrTexture * tex)676 bool GrDawnGpu::onRegenerateMipMapLevels(GrTexture* tex) {
677 this->flushCopyEncoder();
678 GrDawnTexture* src = static_cast<GrDawnTexture*>(tex);
679 int srcWidth = tex->width();
680 int srcHeight = tex->height();
681
682 // SkMipmap doesn't include the base level in the level count so we have to add 1
683 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
684
685 // Create a temporary texture for mipmap generation, then copy to source.
686 // We have to do this even for renderable textures, since GrDawnRenderTarget currently only
687 // contains a view, not a texture.
688 wgpu::TextureDescriptor texDesc;
689 texDesc.usage = wgpu::TextureUsage::Sampled |
690 wgpu::TextureUsage::CopySrc |
691 wgpu::TextureUsage::OutputAttachment;
692 texDesc.size.width = (tex->width() + 1) / 2;
693 texDesc.size.height = (tex->height() + 1) / 2;
694 texDesc.size.depth = 1;
695 texDesc.mipLevelCount = levelCount - 1;
696 texDesc.format = src->format();
697 wgpu::Texture dstTexture = fDevice.CreateTexture(&texDesc);
698
699 const char* vs =
700 "layout(location = 0) out float2 texCoord;\n"
701 "float2 positions[4] = float2[4](float2(-1.0, 1.0),\n"
702 "float2(1.0, 1.0),\n"
703 "float2(-1.0, -1.0),\n"
704 "float2(1.0, -1.0));\n"
705 "float2 texCoords[4] = float2[4](float2(0.0, 0.0),\n"
706 "float2(1.0, 0.0),\n"
707 "float2(0.0, 1.0),\n"
708 "float2(1.0, 1.0));\n"
709 "void main() {\n"
710 " sk_Position = float4(positions[sk_VertexID], 0.0, 1.0);\n"
711 " texCoord = texCoords[sk_VertexID];\n"
712 "}\n";
713 SkSL::String vsSPIRV =
714 this->SkSLToSPIRV(vs, SkSL::Program::kVertex_Kind, false, 0, nullptr);
715
716 const char* fs =
717 "layout(set = 0, binding = 0) uniform sampler samp;\n"
718 "layout(set = 0, binding = 1) uniform texture2D tex;\n"
719 "layout(location = 0) in float2 texCoord;\n"
720 "void main() {\n"
721 " sk_FragColor = sample(makeSampler2D(tex, samp), texCoord);\n"
722 "}\n";
723 SkSL::String fsSPIRV =
724 this->SkSLToSPIRV(fs, SkSL::Program::kFragment_Kind, false, 0, nullptr);
725
726 wgpu::ProgrammableStageDescriptor vsDesc;
727 vsDesc.module = this->createShaderModule(vsSPIRV);
728 vsDesc.entryPoint = "main";
729
730 wgpu::ProgrammableStageDescriptor fsDesc;
731 fsDesc.module = this->createShaderModule(fsSPIRV);
732 fsDesc.entryPoint = "main";
733
734 wgpu::VertexStateDescriptor vertexStateDesc;
735 vertexStateDesc.indexFormat = wgpu::IndexFormat::Uint32;
736
737 wgpu::ColorStateDescriptor csDesc;
738 csDesc.format = static_cast<GrDawnTexture*>(tex)->format();
739
740 wgpu::RenderPipelineDescriptor renderPipelineDesc;
741 renderPipelineDesc.vertexStage = vsDesc;
742 renderPipelineDesc.fragmentStage = &fsDesc;
743 renderPipelineDesc.vertexState = &vertexStateDesc;
744 renderPipelineDesc.primitiveTopology = wgpu::PrimitiveTopology::TriangleStrip;
745 renderPipelineDesc.colorStateCount = 1;
746 renderPipelineDesc.colorStates = &csDesc;
747 wgpu::RenderPipeline pipeline = fDevice.CreateRenderPipeline(&renderPipelineDesc);
748
749 wgpu::BindGroupLayout bgl = pipeline.GetBindGroupLayout(0);
750 wgpu::TextureViewDescriptor srcViewDesc;
751 srcViewDesc.mipLevelCount = 1;
752 wgpu::TextureView srcView = src->texture().CreateView(&srcViewDesc);
753 wgpu::SamplerDescriptor samplerDesc;
754 samplerDesc.minFilter = wgpu::FilterMode::Linear;
755 wgpu::Sampler sampler = fDevice.CreateSampler(&samplerDesc);
756 wgpu::CommandEncoder commandEncoder = fDevice.CreateCommandEncoder();
757 for (uint32_t mipLevel = 0; mipLevel < texDesc.mipLevelCount; mipLevel++) {
758 int dstWidth = std::max(1, srcWidth / 2);
759 int dstHeight = std::max(1, srcHeight / 2);
760 wgpu::TextureViewDescriptor dstViewDesc;
761 dstViewDesc.format = static_cast<GrDawnTexture*>(tex)->format();
762 dstViewDesc.dimension = wgpu::TextureViewDimension::e2D;
763 dstViewDesc.baseMipLevel = mipLevel;
764 dstViewDesc.mipLevelCount = 1;
765 wgpu::TextureView dstView = dstTexture.CreateView(&dstViewDesc);
766 wgpu::BindGroupEntry bge[2];
767 bge[0].binding = 0;
768 bge[0].sampler = sampler;
769 bge[1].binding = 1;
770 bge[1].textureView = srcView;
771 wgpu::BindGroupDescriptor bgDesc;
772 bgDesc.layout = bgl;
773 bgDesc.entryCount = 2;
774 bgDesc.entries = bge;
775 wgpu::BindGroup bindGroup = fDevice.CreateBindGroup(&bgDesc);
776 wgpu::RenderPassColorAttachmentDescriptor colorAttachment;
777 colorAttachment.attachment = dstView;
778 colorAttachment.clearColor = { 0.0f, 0.0f, 0.0f, 0.0f };
779 colorAttachment.loadOp = wgpu::LoadOp::Load;
780 colorAttachment.storeOp = wgpu::StoreOp::Store;
781 wgpu::RenderPassColorAttachmentDescriptor* colorAttachments = { &colorAttachment };
782 wgpu::RenderPassDescriptor renderPassDesc;
783 renderPassDesc.colorAttachmentCount = 1;
784 renderPassDesc.colorAttachments = colorAttachments;
785 wgpu::RenderPassEncoder rpe = commandEncoder.BeginRenderPass(&renderPassDesc);
786 rpe.SetPipeline(pipeline);
787 rpe.SetBindGroup(0, bindGroup);
788 rpe.Draw(4, 1, 0, 0);
789 rpe.EndPass();
790
791 wgpu::Extent3D copySize = {(uint32_t)dstWidth, (uint32_t)dstHeight, 1};
792 wgpu::TextureCopyView srcCopyView;
793 srcCopyView.texture = dstTexture;
794 srcCopyView.mipLevel = mipLevel;
795 wgpu::TextureCopyView dstCopyView;
796 dstCopyView.mipLevel = mipLevel + 1;
797 dstCopyView.texture = src->texture();
798 commandEncoder.CopyTextureToTexture(&srcCopyView, &dstCopyView, ©Size);
799
800 srcHeight = dstHeight;
801 srcWidth = dstWidth;
802 srcView = dstView;
803 }
804 fCommandBuffers.push_back(commandEncoder.Finish());
805 return true;
806 }
807
submit(GrOpsRenderPass * renderPass)808 void GrDawnGpu::submit(GrOpsRenderPass* renderPass) {
809 this->flushCopyEncoder();
810 static_cast<GrDawnOpsRenderPass*>(renderPass)->submit();
811 }
812
insertFence()813 GrFence SK_WARN_UNUSED_RESULT GrDawnGpu::insertFence() {
814 wgpu::FenceDescriptor desc;
815 wgpu::Fence fence = fQueue.CreateFence(&desc);
816 return reinterpret_cast<GrFence>(new Fence(fDevice, fence));
817 }
818
waitFence(GrFence fence)819 bool GrDawnGpu::waitFence(GrFence fence) {
820 return reinterpret_cast<Fence*>(fence)->check();
821 }
822
deleteFence(GrFence fence) const823 void GrDawnGpu::deleteFence(GrFence fence) const {
824 delete reinterpret_cast<Fence*>(fence);
825 }
826
makeSemaphore(bool isOwned)827 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrDawnGpu::makeSemaphore(bool isOwned) {
828 SkASSERT(!"unimplemented");
829 return nullptr;
830 }
831
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)832 std::unique_ptr<GrSemaphore> GrDawnGpu::wrapBackendSemaphore(
833 const GrBackendSemaphore& semaphore,
834 GrResourceProvider::SemaphoreWrapType wrapType,
835 GrWrapOwnership ownership) {
836 SkASSERT(!"unimplemented");
837 return nullptr;
838 }
839
insertSemaphore(GrSemaphore * semaphore)840 void GrDawnGpu::insertSemaphore(GrSemaphore* semaphore) {
841 SkASSERT(!"unimplemented");
842 }
843
waitSemaphore(GrSemaphore * semaphore)844 void GrDawnGpu::waitSemaphore(GrSemaphore* semaphore) {
845 SkASSERT(!"unimplemented");
846 }
847
checkFinishProcs()848 void GrDawnGpu::checkFinishProcs() {
849 fFinishCallbacks.check();
850 }
851
prepareTextureForCrossContextUsage(GrTexture * texture)852 std::unique_ptr<GrSemaphore> GrDawnGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
853 SkASSERT(!"unimplemented");
854 return nullptr;
855 }
856
getOrCreateRenderPipeline(GrRenderTarget * rt,const GrProgramInfo & programInfo)857 sk_sp<GrDawnProgram> GrDawnGpu::getOrCreateRenderPipeline(
858 GrRenderTarget* rt,
859 const GrProgramInfo& programInfo) {
860 GrProgramDesc desc = this->caps()->makeDesc(rt, programInfo);
861 if (!desc.isValid()) {
862 return nullptr;
863 }
864
865 if (sk_sp<GrDawnProgram>* program = fRenderPipelineCache.find(desc)) {
866 return *program;
867 }
868
869 wgpu::TextureFormat colorFormat;
870 SkAssertResult(programInfo.backendFormat().asDawnFormat(&colorFormat));
871
872 wgpu::TextureFormat stencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
873 bool hasDepthStencil = rt->getStencilAttachment() != nullptr;
874
875 sk_sp<GrDawnProgram> program = GrDawnProgramBuilder::Build(
876 this, rt, programInfo, colorFormat,
877 hasDepthStencil, stencilFormat, &desc);
878 fRenderPipelineCache.insert(desc, program);
879 return program;
880 }
881
getOrCreateSampler(GrSamplerState samplerState)882 wgpu::Sampler GrDawnGpu::getOrCreateSampler(GrSamplerState samplerState) {
883 auto i = fSamplers.find(samplerState);
884 if (i != fSamplers.end()) {
885 return i->second;
886 }
887 wgpu::SamplerDescriptor desc;
888 desc.addressModeU = to_dawn_address_mode(samplerState.wrapModeX());
889 desc.addressModeV = to_dawn_address_mode(samplerState.wrapModeY());
890 desc.addressModeW = wgpu::AddressMode::ClampToEdge;
891 desc.magFilter = desc.minFilter = to_dawn_filter_mode(samplerState.filter());
892 desc.mipmapFilter = to_dawn_mipmap_mode(samplerState.mipmapMode());
893 wgpu::Sampler sampler = device().CreateSampler(&desc);
894 fSamplers.insert(std::pair<GrSamplerState, wgpu::Sampler>(samplerState, sampler));
895 return sampler;
896 }
897
allocateUniformRingBufferSlice(int size)898 GrDawnRingBuffer::Slice GrDawnGpu::allocateUniformRingBufferSlice(int size) {
899 return fUniformRingBuffer.allocate(size);
900 }
901
appendCommandBuffer(wgpu::CommandBuffer commandBuffer)902 void GrDawnGpu::appendCommandBuffer(wgpu::CommandBuffer commandBuffer) {
903 if (commandBuffer) {
904 fCommandBuffers.push_back(commandBuffer);
905 }
906 }
907
getCopyEncoder()908 wgpu::CommandEncoder GrDawnGpu::getCopyEncoder() {
909 if (!fCopyEncoder) {
910 fCopyEncoder = fDevice.CreateCommandEncoder();
911 }
912 return fCopyEncoder;
913 }
914
flushCopyEncoder()915 void GrDawnGpu::flushCopyEncoder() {
916 if (fCopyEncoder) {
917 fCommandBuffers.push_back(fCopyEncoder.Finish());
918 fCopyEncoder = nullptr;
919 }
920 }
921
moveStagingBuffersToBusyAndMapAsync()922 void GrDawnGpu::moveStagingBuffersToBusyAndMapAsync() {
923 for (size_t i = 0; i < fSubmittedStagingBuffers.size(); ++i) {
924 GrDawnBuffer* buffer = static_cast<GrDawnBuffer*>(fSubmittedStagingBuffers[i].get());
925 buffer->mapWriteAsync();
926 fBusyStagingBuffers.push_back(std::move(fSubmittedStagingBuffers[i]));
927 }
928 fSubmittedStagingBuffers.clear();
929 }
930
SkSLToSPIRV(const char * shaderString,SkSL::Program::Kind kind,bool flipY,uint32_t rtHeightOffset,SkSL::Program::Inputs * inputs)931 SkSL::String GrDawnGpu::SkSLToSPIRV(const char* shaderString, SkSL::Program::Kind kind, bool flipY,
932 uint32_t rtHeightOffset, SkSL::Program::Inputs* inputs) {
933 SkSL::Program::Settings settings;
934 settings.fFlipY = flipY;
935 settings.fRTHeightOffset = rtHeightOffset;
936 settings.fRTHeightBinding = 0;
937 settings.fRTHeightSet = 0;
938 std::unique_ptr<SkSL::Program> program = this->shaderCompiler()->convertProgram(
939 kind,
940 shaderString,
941 settings);
942 if (!program) {
943 SkDebugf("SkSL error:\n%s\n", this->shaderCompiler()->errorText().c_str());
944 SkASSERT(false);
945 return "";
946 }
947 if (inputs) {
948 *inputs = program->fInputs;
949 }
950 SkSL::String code;
951 if (!this->shaderCompiler()->toSPIRV(*program, &code)) {
952 return "";
953 }
954 return code;
955 }
956
createShaderModule(const SkSL::String & spirvSource)957 wgpu::ShaderModule GrDawnGpu::createShaderModule(const SkSL::String& spirvSource) {
958 wgpu::ShaderModuleSPIRVDescriptor desc;
959 desc.codeSize = spirvSource.size() / 4;
960 desc.code = reinterpret_cast<const uint32_t*>(spirvSource.c_str());
961
962 wgpu::ShaderModuleDescriptor smDesc;
963 smDesc.nextInChain = &desc;
964
965 return fDevice.CreateShaderModule(&smDesc);
966 }
967