1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkGpu.h"
9 
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/core/SkMipmap.h"
18 #include "src/gpu/GrBackendUtils.h"
19 #include "src/gpu/GrDataUtils.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrGeometryProcessor.h"
22 #include "src/gpu/GrGpuResourceCacheAccess.h"
23 #include "src/gpu/GrNativeRect.h"
24 #include "src/gpu/GrPipeline.h"
25 #include "src/gpu/GrRenderTarget.h"
26 #include "src/gpu/GrRenderTargetContext.h"
27 #include "src/gpu/GrTexture.h"
28 #include "src/gpu/SkGpuDevice.h"
29 #include "src/gpu/SkGr.h"
30 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
31 #include "src/gpu/vk/GrVkCommandBuffer.h"
32 #include "src/gpu/vk/GrVkCommandPool.h"
33 #include "src/gpu/vk/GrVkImage.h"
34 #include "src/gpu/vk/GrVkInterface.h"
35 #include "src/gpu/vk/GrVkMemory.h"
36 #include "src/gpu/vk/GrVkMeshBuffer.h"
37 #include "src/gpu/vk/GrVkOpsRenderPass.h"
38 #include "src/gpu/vk/GrVkPipeline.h"
39 #include "src/gpu/vk/GrVkPipelineState.h"
40 #include "src/gpu/vk/GrVkRenderPass.h"
41 #include "src/gpu/vk/GrVkResourceProvider.h"
42 #include "src/gpu/vk/GrVkSemaphore.h"
43 #include "src/gpu/vk/GrVkTexture.h"
44 #include "src/gpu/vk/GrVkTextureRenderTarget.h"
45 #include "src/gpu/vk/GrVkTransferBuffer.h"
46 #include "src/image/SkImage_Gpu.h"
47 #include "src/image/SkSurface_Gpu.h"
48 #include "src/sksl/SkSLCompiler.h"
49 
50 #include "include/gpu/vk/GrVkExtensions.h"
51 #include "include/gpu/vk/GrVkTypes.h"
52 
53 #include <utility>
54 
55 #if !defined(SK_BUILD_FOR_WIN)
56 #include <unistd.h>
57 #endif // !defined(SK_BUILD_FOR_WIN)
58 
59 #if defined(SK_BUILD_FOR_WIN) && defined(SK_DEBUG)
60 #include "src/core/SkLeanWindows.h"
61 #endif
62 
63 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
64 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
65 
Make(const GrVkBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)66 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
67                            const GrContextOptions& options, GrDirectContext* direct) {
68     if (backendContext.fInstance == VK_NULL_HANDLE ||
69         backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
70         backendContext.fDevice == VK_NULL_HANDLE ||
71         backendContext.fQueue == VK_NULL_HANDLE) {
72         return nullptr;
73     }
74     if (!backendContext.fGetProc) {
75         return nullptr;
76     }
77 
78     PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
79             reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
80                     backendContext.fGetProc("vkEnumerateInstanceVersion",
81                                             VK_NULL_HANDLE, VK_NULL_HANDLE));
82     uint32_t instanceVersion = 0;
83     if (!localEnumerateInstanceVersion) {
84         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
85     } else {
86         VkResult err = localEnumerateInstanceVersion(&instanceVersion);
87         if (err) {
88             SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
89             return nullptr;
90         }
91     }
92 
93     PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
94             reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
95                     backendContext.fGetProc("vkGetPhysicalDeviceProperties",
96                                             backendContext.fInstance,
97                                             VK_NULL_HANDLE));
98 
99     if (!localGetPhysicalDeviceProperties) {
100         return nullptr;
101     }
102     VkPhysicalDeviceProperties physDeviceProperties;
103     localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
104     uint32_t physDevVersion = physDeviceProperties.apiVersion;
105 
106     uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
107                                                         : instanceVersion;
108 
109     instanceVersion = std::min(instanceVersion, apiVersion);
110     physDevVersion = std::min(physDevVersion, apiVersion);
111 
112     sk_sp<const GrVkInterface> interface;
113 
114     if (backendContext.fVkExtensions) {
115         interface.reset(new GrVkInterface(backendContext.fGetProc,
116                                           backendContext.fInstance,
117                                           backendContext.fDevice,
118                                           instanceVersion,
119                                           physDevVersion,
120                                           backendContext.fVkExtensions));
121         if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
122             return nullptr;
123         }
124     } else {
125         GrVkExtensions extensions;
126         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
127         // need to know if this is enabled to know if we can transition to a present layout when
128         // flushing a surface.
129         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
130             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
131             extensions.init(backendContext.fGetProc, backendContext.fInstance,
132                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
133         }
134         interface.reset(new GrVkInterface(backendContext.fGetProc,
135                                           backendContext.fInstance,
136                                           backendContext.fDevice,
137                                           instanceVersion,
138                                           physDevVersion,
139                                           &extensions));
140         if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
141             return nullptr;
142         }
143     }
144 
145     sk_sp<GrVkCaps> caps;
146     if (backendContext.fDeviceFeatures2) {
147         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
148                                 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
149                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
150     } else if (backendContext.fDeviceFeatures) {
151         VkPhysicalDeviceFeatures2 features2;
152         features2.pNext = nullptr;
153         features2.features = *backendContext.fDeviceFeatures;
154         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
155                                 features2, instanceVersion, physDevVersion,
156                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
157     } else {
158         VkPhysicalDeviceFeatures2 features;
159         memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
160         features.pNext = nullptr;
161         if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
162             features.features.geometryShader = true;
163         }
164         if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
165             features.features.dualSrcBlend = true;
166         }
167         if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
168             features.features.sampleRateShading = true;
169         }
170         GrVkExtensions extensions;
171         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
172         // need to know if this is enabled to know if we can transition to a present layout when
173         // flushing a surface.
174         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
175             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
176             extensions.init(backendContext.fGetProc, backendContext.fInstance,
177                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
178         }
179         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
180                                 features, instanceVersion, physDevVersion, extensions,
181                                 backendContext.fProtectedContext));
182     }
183 
184     if (!caps) {
185         return nullptr;
186     }
187 
188     sk_sp<GrVkMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
189     if (!memoryAllocator) {
190         // We were not given a memory allocator at creation
191         memoryAllocator = GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
192                                                        backendContext.fPhysicalDevice,
193                                                        backendContext.fDevice, physDevVersion,
194                                                        backendContext.fVkExtensions, interface,
195                                                        caps.get());
196     }
197     if (!memoryAllocator) {
198         SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
199         return nullptr;
200     }
201 
202      sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface,
203                                       instanceVersion, physDevVersion,
204                                       std::move(memoryAllocator)));
205      if (backendContext.fProtectedContext == GrProtected::kYes &&
206          !vkGpu->vkCaps().supportsProtectedMemory()) {
207          return nullptr;
208      }
209      return std::move(vkGpu);
210 }
211 
212 ////////////////////////////////////////////////////////////////////////////////
213 
GrVkGpu(GrDirectContext * direct,const GrVkBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const GrVkInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<GrVkMemoryAllocator> memoryAllocator)214 GrVkGpu::GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendContext,
215                  sk_sp<GrVkCaps> caps, sk_sp<const GrVkInterface> interface,
216                  uint32_t instanceVersion, uint32_t physicalDeviceVersion,
217                  sk_sp<GrVkMemoryAllocator> memoryAllocator)
218         : INHERITED(direct)
219         , fInterface(std::move(interface))
220         , fMemoryAllocator(std::move(memoryAllocator))
221         , fVkCaps(std::move(caps))
222         , fPhysicalDevice(backendContext.fPhysicalDevice)
223         , fDevice(backendContext.fDevice)
224         , fQueue(backendContext.fQueue)
225         , fQueueIndex(backendContext.fGraphicsQueueIndex)
226         , fResourceProvider(this)
227         , fStagingBufferManager(this)
228         , fDisconnected(false)
229         , fProtectedContext(backendContext.fProtectedContext) {
230     SkASSERT(!backendContext.fOwnsInstanceAndDevice);
231     SkASSERT(fMemoryAllocator);
232 
233     fCompiler = new SkSL::Compiler(fVkCaps->shaderCaps());
234 
235     fCaps.reset(SkRef(fVkCaps.get()));
236 
237     VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
238     VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
239 
240     fResourceProvider.init();
241 
242     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
243     if (fMainCmdPool) {
244         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
245         SkASSERT(this->currentCommandBuffer());
246         this->currentCommandBuffer()->begin(this);
247     }
248 }
249 
destroyResources()250 void GrVkGpu::destroyResources() {
251     if (fMainCmdPool) {
252         fMainCmdPool->getPrimaryCommandBuffer()->end(this);
253         fMainCmdPool->close();
254     }
255 
256     // wait for all commands to finish
257     VkResult res = VK_CALL(QueueWaitIdle(fQueue));
258 
259     // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
260     // on the command buffers even though they have completed. This causes an assert to fire when
261     // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
262     // sleep to make sure the fence signals.
263 #ifdef SK_DEBUG
264     if (this->vkCaps().mustSleepOnTearDown()) {
265 #if defined(SK_BUILD_FOR_WIN)
266         Sleep(10); // In milliseconds
267 #else
268         sleep(1);  // In seconds
269 #endif
270     }
271 #endif
272 
273 #ifdef SK_DEBUG
274     SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
275 #endif
276 
277     if (fMainCmdPool) {
278         fMainCmdPool->unref();
279         fMainCmdPool = nullptr;
280     }
281 
282     for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
283         fSemaphoresToWaitOn[i]->unref();
284     }
285     fSemaphoresToWaitOn.reset();
286 
287     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
288         fSemaphoresToSignal[i]->unref();
289     }
290     fSemaphoresToSignal.reset();
291 
292     fStagingBufferManager.reset();
293 
294     // must call this just before we destroy the command pool and VkDevice
295     fResourceProvider.destroyResources(VK_ERROR_DEVICE_LOST == res);
296 }
297 
~GrVkGpu()298 GrVkGpu::~GrVkGpu() {
299     if (!fDisconnected) {
300         this->destroyResources();
301     }
302     delete fCompiler;
303     // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
304     // clients can continue to delete backend textures even after a context has been abandoned.
305     fMemoryAllocator.reset();
306 }
307 
308 
disconnect(DisconnectType type)309 void GrVkGpu::disconnect(DisconnectType type) {
310     INHERITED::disconnect(type);
311     if (!fDisconnected) {
312         this->destroyResources();
313 
314         fSemaphoresToWaitOn.reset();
315         fSemaphoresToSignal.reset();
316         fMainCmdBuffer = nullptr;
317         fDisconnected = true;
318     }
319 }
320 
321 ///////////////////////////////////////////////////////////////////////////////
322 
onGetOpsRenderPass(GrRenderTarget * rt,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)323 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
324         GrRenderTarget* rt,
325         GrAttachment* stencil,
326         GrSurfaceOrigin origin,
327         const SkIRect& bounds,
328         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
329         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
330         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
331         GrXferBarrierFlags renderPassXferBarriers) {
332     if (!fCachedOpsRenderPass) {
333         fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
334     }
335 
336     if (!fCachedOpsRenderPass->set(rt, stencil, origin, bounds, colorInfo, stencilInfo,
337                                    sampledProxies, renderPassXferBarriers)) {
338         return nullptr;
339     }
340     return fCachedOpsRenderPass.get();
341 }
342 
submitCommandBuffer(SyncQueue sync)343 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
344     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
345     if (!this->currentCommandBuffer()) {
346         return false;
347     }
348     SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
349 
350     if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
351         !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
352         // We may have added finished procs during the flush call. Since there is no actual work
353         // we are not submitting the command buffer and may never come back around to submit it.
354         // Thus we call all current finished procs manually, since the work has technically
355         // finished.
356         this->currentCommandBuffer()->callFinishedProcs();
357         SkASSERT(fDrawables.empty());
358         fResourceProvider.checkCommandBuffers();
359         return true;
360     }
361 
362     fMainCmdBuffer->end(this);
363     SkASSERT(fMainCmdPool);
364     fMainCmdPool->close();
365     bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
366                                                    fSemaphoresToWaitOn);
367 
368     if (didSubmit && sync == kForce_SyncQueue) {
369         fMainCmdBuffer->forceSync(this);
370     }
371 
372     // We must delete any drawables that had to wait until submit to destroy.
373     fDrawables.reset();
374 
375     // If we didn't submit the command buffer then we did not wait on any semaphores. We will
376     // continue to hold onto these semaphores and wait on them during the next command buffer
377     // submission.
378     if (didSubmit) {
379         for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
380             fSemaphoresToWaitOn[i]->unref();
381         }
382         fSemaphoresToWaitOn.reset();
383     }
384 
385     // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
386     // not try to recover the work that wasn't submitted and instead just drop it all. The client
387     // will be notified that the semaphores were not submit so that they will not try to wait on
388     // them.
389     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
390         fSemaphoresToSignal[i]->unref();
391     }
392     fSemaphoresToSignal.reset();
393 
394     // Release old command pool and create a new one
395     fMainCmdPool->unref();
396     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
397     if (fMainCmdPool) {
398         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
399         SkASSERT(fMainCmdBuffer);
400         fMainCmdBuffer->begin(this);
401     } else {
402         fMainCmdBuffer = nullptr;
403     }
404     // We must wait to call checkCommandBuffers until after we get a new command buffer. The
405     // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
406     // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
407     // one that was just submitted.
408     fResourceProvider.checkCommandBuffers();
409     return didSubmit;
410 }
411 
412 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)413 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
414                                            GrAccessPattern accessPattern, const void* data) {
415     sk_sp<GrGpuBuffer> buff;
416     switch (type) {
417         case GrGpuBufferType::kVertex:
418         case GrGpuBufferType::kIndex:
419         case GrGpuBufferType::kDrawIndirect:
420             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
421                      kStatic_GrAccessPattern == accessPattern);
422             buff = GrVkMeshBuffer::Make(this, type, size,
423                                         kDynamic_GrAccessPattern == accessPattern);
424             break;
425         case GrGpuBufferType::kXferCpuToGpu:
426             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
427                      kStream_GrAccessPattern == accessPattern);
428             buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyRead_Type);
429             break;
430         case GrGpuBufferType::kXferGpuToCpu:
431             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
432                      kStream_GrAccessPattern == accessPattern);
433             buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyWrite_Type);
434             break;
435         default:
436             SK_ABORT("Unknown buffer type.");
437     }
438     if (data && buff) {
439         buff->updateData(data, size);
440     }
441     return buff;
442 }
443 
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)444 bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
445                             GrColorType surfaceColorType, GrColorType srcColorType,
446                             const GrMipLevel texels[], int mipLevelCount,
447                             bool prepForTexSampling) {
448     GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
449     if (!vkTex) {
450         return false;
451     }
452 
453     // Make sure we have at least the base level
454     if (!mipLevelCount || !texels[0].fPixels) {
455         return false;
456     }
457 
458     SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
459     bool success = false;
460     bool linearTiling = vkTex->isLinearTiled();
461     if (linearTiling) {
462         if (mipLevelCount > 1) {
463             SkDebugf("Can't upload mipmap data to linear tiled texture");
464             return false;
465         }
466         if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
467             // Need to change the layout to general in order to perform a host write
468             vkTex->setImageLayout(this,
469                                   VK_IMAGE_LAYOUT_GENERAL,
470                                   VK_ACCESS_HOST_WRITE_BIT,
471                                   VK_PIPELINE_STAGE_HOST_BIT,
472                                   false);
473             if (!this->submitCommandBuffer(kForce_SyncQueue)) {
474                 return false;
475             }
476         }
477         success = this->uploadTexDataLinear(vkTex, left, top, width, height, srcColorType,
478                                             texels[0].fPixels, texels[0].fRowBytes);
479     } else {
480         SkASSERT(mipLevelCount <= vkTex->maxMipmapLevel() + 1);
481         success = this->uploadTexDataOptimal(vkTex, left, top, width, height, srcColorType, texels,
482                                              mipLevelCount);
483     }
484 
485     if (prepForTexSampling) {
486         vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
487                               VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
488                               false);
489     }
490 
491     return success;
492 }
493 
onTransferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t bufferOffset,size_t rowBytes)494 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
495                                  GrColorType surfaceColorType, GrColorType bufferColorType,
496                                  GrGpuBuffer* transferBuffer, size_t bufferOffset,
497                                  size_t rowBytes) {
498     if (!this->currentCommandBuffer()) {
499         return false;
500     }
501     if (surfaceColorType != bufferColorType) {
502         return false;
503     }
504 
505     size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
506     if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
507         return false;
508     }
509 
510     // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
511     if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
512         return false;
513     }
514     GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
515     if (!vkTex) {
516         return false;
517     }
518 
519     // Can't transfer compressed data
520     SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
521 
522     GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
523     if (!vkBuffer) {
524         return false;
525     }
526 
527     SkDEBUGCODE(
528         SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
529         SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
530         SkASSERT(bounds.contains(subRect));
531     )
532 
533     // Set up copy region
534     VkBufferImageCopy region;
535     memset(&region, 0, sizeof(VkBufferImageCopy));
536     region.bufferOffset = bufferOffset;
537     region.bufferRowLength = (uint32_t)(rowBytes/bpp);
538     region.bufferImageHeight = 0;
539     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
540     region.imageOffset = { left, top, 0 };
541     region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
542 
543     // Change layout of our target so it can be copied to
544     vkTex->setImageLayout(this,
545                           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
546                           VK_ACCESS_TRANSFER_WRITE_BIT,
547                           VK_PIPELINE_STAGE_TRANSFER_BIT,
548                           false);
549 
550     // Copy the buffer to the image
551     this->currentCommandBuffer()->copyBufferToImage(this,
552                                                     vkBuffer,
553                                                     vkTex,
554                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
555                                                     1,
556                                                     &region);
557 
558     vkTex->markMipmapsDirty();
559     return true;
560 }
561 
onTransferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)562 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
563                                    GrColorType surfaceColorType, GrColorType bufferColorType,
564                                    GrGpuBuffer* transferBuffer, size_t offset) {
565     if (!this->currentCommandBuffer()) {
566         return false;
567     }
568     SkASSERT(surface);
569     SkASSERT(transferBuffer);
570     if (fProtectedContext == GrProtected::kYes) {
571         return false;
572     }
573     if (surfaceColorType != bufferColorType) {
574         return false;
575     }
576 
577     GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
578 
579     GrVkImage* srcImage;
580     if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
581         // Reading from render targets that wrap a secondary command buffer is not allowed since
582         // it would require us to know the VkImage, which we don't have, as well as need us to
583         // stop and start the VkRenderPass which we don't have access to.
584         if (rt->wrapsSecondaryCommandBuffer()) {
585             return false;
586         }
587         srcImage = rt;
588     } else {
589         srcImage = static_cast<GrVkTexture*>(surface->asTexture());
590     }
591 
592     if (GrVkFormatBytesPerBlock(srcImage->imageFormat()) !=
593         GrColorTypeBytesPerPixel(surfaceColorType)) {
594         return false;
595     }
596 
597     // Set up copy region
598     VkBufferImageCopy region;
599     memset(&region, 0, sizeof(VkBufferImageCopy));
600     region.bufferOffset = offset;
601     region.bufferRowLength = width;
602     region.bufferImageHeight = 0;
603     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
604     region.imageOffset = { left, top, 0 };
605     region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
606 
607     srcImage->setImageLayout(this,
608                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
609                              VK_ACCESS_TRANSFER_READ_BIT,
610                              VK_PIPELINE_STAGE_TRANSFER_BIT,
611                              false);
612 
613     this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
614                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
615                                                     vkBuffer, 1, &region);
616 
617     // Make sure the copy to buffer has finished.
618     vkBuffer->addMemoryBarrier(this,
619                                VK_ACCESS_TRANSFER_WRITE_BIT,
620                                VK_ACCESS_HOST_READ_BIT,
621                                VK_PIPELINE_STAGE_TRANSFER_BIT,
622                                VK_PIPELINE_STAGE_HOST_BIT,
623                                false);
624     return true;
625 }
626 
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)627 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
628                            const SkIPoint& dstPoint) {
629     if (!this->currentCommandBuffer()) {
630         return;
631     }
632 
633     SkASSERT(dst);
634     SkASSERT(src && src->numSamples() > 1 && src->msaaImage());
635 
636     VkImageResolve resolveInfo;
637     resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
638     resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
639     resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
640     resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
641     resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
642 
643     GrVkImage* dstImage;
644     GrRenderTarget* dstRT = dst->asRenderTarget();
645     if (dstRT) {
646         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
647         dstImage = vkRT;
648     } else {
649         SkASSERT(dst->asTexture());
650         dstImage = static_cast<GrVkTexture*>(dst->asTexture());
651     }
652     dstImage->setImageLayout(this,
653                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
654                              VK_ACCESS_TRANSFER_WRITE_BIT,
655                              VK_PIPELINE_STAGE_TRANSFER_BIT,
656                              false);
657 
658     src->msaaImage()->setImageLayout(this,
659                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
660                                      VK_ACCESS_TRANSFER_READ_BIT,
661                                      VK_PIPELINE_STAGE_TRANSFER_BIT,
662                                      false);
663     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
664     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
665     this->currentCommandBuffer()->resolveImage(this, *src->msaaImage(), *dstImage, 1, &resolveInfo);
666 }
667 
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)668 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
669     SkASSERT(target->numSamples() > 1);
670     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
671     SkASSERT(rt->msaaImage());
672     SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
673 
674     this->resolveImage(target, rt, resolveRect,
675                        SkIPoint::Make(resolveRect.x(), resolveRect.y()));
676 }
677 
uploadTexDataLinear(GrVkTexture * tex,int left,int top,int width,int height,GrColorType dataColorType,const void * data,size_t rowBytes)678 bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
679                                   GrColorType dataColorType, const void* data, size_t rowBytes) {
680     SkASSERT(data);
681     SkASSERT(tex->isLinearTiled());
682 
683     SkDEBUGCODE(
684         SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
685         SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height());
686         SkASSERT(bounds.contains(subRect));
687     )
688     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
689     size_t trimRowBytes = width * bpp;
690 
691     SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
692              VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
693     const VkImageSubresource subres = {
694         VK_IMAGE_ASPECT_COLOR_BIT,
695         0,  // mipLevel
696         0,  // arraySlice
697     };
698     VkSubresourceLayout layout;
699 
700     const GrVkInterface* interface = this->vkInterface();
701 
702     GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
703                                                     tex->image(),
704                                                     &subres,
705                                                     &layout));
706 
707     const GrVkAlloc& alloc = tex->alloc();
708     if (VK_NULL_HANDLE == alloc.fMemory) {
709         return false;
710     }
711     VkDeviceSize offset = top * layout.rowPitch + left * bpp;
712     VkDeviceSize size = height*layout.rowPitch;
713     SkASSERT(size + offset <= alloc.fSize);
714     void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
715     if (!mapPtr) {
716         return false;
717     }
718     mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
719 
720     SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
721                  height);
722 
723     GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
724     GrVkMemory::UnmapAlloc(this, alloc);
725 
726     return true;
727 }
728 
729 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
730 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_regions(GrStagingBufferManager * stagingBufferManager,SkTArray<VkBufferImageCopy> * regions,SkTArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipMapped)731 static size_t fill_in_regions(GrStagingBufferManager* stagingBufferManager,
732                               SkTArray<VkBufferImageCopy>* regions,
733                               SkTArray<size_t>* individualMipOffsets,
734                               GrStagingBufferManager::Slice* slice,
735                               SkImage::CompressionType compression,
736                               VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped) {
737     int numMipLevels = 1;
738     if (mipMapped == GrMipmapped::kYes) {
739         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
740     }
741 
742     regions->reserve_back(numMipLevels);
743     individualMipOffsets->reserve_back(numMipLevels);
744 
745     size_t bytesPerBlock = GrVkFormatBytesPerBlock(vkFormat);
746 
747     size_t combinedBufferSize;
748     if (compression == SkImage::CompressionType::kNone) {
749         combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerBlock, dimensions,
750                                                               individualMipOffsets,
751                                                               numMipLevels);
752     } else {
753         combinedBufferSize = SkCompressedDataSize(compression, dimensions, individualMipOffsets,
754                                                   mipMapped == GrMipmapped::kYes);
755     }
756     SkASSERT(individualMipOffsets->count() == numMipLevels);
757 
758     // Get a staging buffer slice to hold our mip data.
759     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
760     size_t alignment = SkAlign4(bytesPerBlock);
761     *slice = stagingBufferManager->allocateStagingBufferSlice(combinedBufferSize, alignment);
762     if (!slice->fBuffer) {
763         return 0;
764     }
765 
766     for (int i = 0; i < numMipLevels; ++i) {
767         VkBufferImageCopy& region = regions->push_back();
768         memset(&region, 0, sizeof(VkBufferImageCopy));
769         region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
770         SkISize revisedDimensions = GrCompressedDimensions(compression, dimensions);
771         region.bufferRowLength = revisedDimensions.width();
772         region.bufferImageHeight = revisedDimensions.height();
773         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
774         region.imageOffset = {0, 0, 0};
775         region.imageExtent = {SkToU32(dimensions.width()),
776                               SkToU32(dimensions.height()), 1};
777 
778         dimensions = {std::max(1, dimensions.width() /2),
779                       std::max(1, dimensions.height()/2)};
780     }
781 
782     return combinedBufferSize;
783 }
784 
uploadTexDataOptimal(GrVkTexture * tex,int left,int top,int width,int height,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)785 bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
786                                    GrColorType dataColorType, const GrMipLevel texels[],
787                                    int mipLevelCount) {
788     if (!this->currentCommandBuffer()) {
789         return false;
790     }
791 
792     SkASSERT(!tex->isLinearTiled());
793     // The assumption is either that we have no mipmaps, or that our rect is the entire texture
794     SkASSERT(1 == mipLevelCount ||
795              (0 == left && 0 == top && width == tex->width() && height == tex->height()));
796 
797     // We assume that if the texture has mip levels, we either upload to all the levels or just the
798     // first.
799     SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->maxMipmapLevel() + 1));
800 
801     if (width == 0 || height == 0) {
802         return false;
803     }
804 
805     SkASSERT(this->vkCaps().surfaceSupportsWritePixels(tex));
806     SkASSERT(this->vkCaps().areColorTypeAndFormatCompatible(dataColorType, tex->backendFormat()));
807 
808     // For RGB_888x src data we are uploading it first to an RGBA texture and then copying it to the
809     // dst RGB texture. Thus we do not upload mip levels for that.
810     if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
811         // First check that we'll be able to do the copy to the to the R8G8B8 image in the end via a
812         // blit or draw.
813         if (!this->vkCaps().formatCanBeDstofBlit(VK_FORMAT_R8G8B8_UNORM, tex->isLinearTiled()) &&
814             !this->vkCaps().isFormatRenderable(VK_FORMAT_R8G8B8_UNORM, 1)) {
815             return false;
816         }
817         mipLevelCount = 1;
818     }
819 
820     SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
821     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
822 
823     // texels is const.
824     // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
825     // Because of this we need to make a non-const shallow copy of texels.
826     SkAutoTMalloc<GrMipLevel> texelsShallowCopy;
827 
828     texelsShallowCopy.reset(mipLevelCount);
829     memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel));
830 
831     SkTArray<size_t> individualMipOffsets(mipLevelCount);
832     individualMipOffsets.push_back(0);
833     size_t combinedBufferSize = width * bpp * height;
834     int currentWidth = width;
835     int currentHeight = height;
836     if (!texelsShallowCopy[0].fPixels) {
837         combinedBufferSize = 0;
838     }
839 
840     // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
841     // config. This works with the assumption that the bytes in pixel config is always a power of 2.
842     SkASSERT((bpp & (bpp - 1)) == 0);
843     const size_t alignmentMask = 0x3 | (bpp - 1);
844     for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; currentMipLevel++) {
845         currentWidth = std::max(1, currentWidth/2);
846         currentHeight = std::max(1, currentHeight/2);
847 
848         if (texelsShallowCopy[currentMipLevel].fPixels) {
849             const size_t trimmedSize = currentWidth * bpp * currentHeight;
850             const size_t alignmentDiff = combinedBufferSize & alignmentMask;
851             if (alignmentDiff != 0) {
852                 combinedBufferSize += alignmentMask - alignmentDiff + 1;
853             }
854             individualMipOffsets.push_back(combinedBufferSize);
855             combinedBufferSize += trimmedSize;
856         } else {
857             individualMipOffsets.push_back(0);
858         }
859     }
860     if (0 == combinedBufferSize) {
861         // We don't actually have any data to upload so just return success
862         return true;
863     }
864 
865     // Get a staging buffer slice to hold our mip data.
866     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
867     size_t alignment = SkAlign4(bpp);
868     GrStagingBufferManager::Slice slice =
869             fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
870     if (!slice.fBuffer) {
871         return false;
872     }
873 
874     int uploadLeft = left;
875     int uploadTop = top;
876     GrVkTexture* uploadTexture = tex;
877     // For uploading RGB_888x data to an R8G8B8_UNORM texture we must first upload the data to an
878     // R8G8B8A8_UNORM image and then copy it.
879     sk_sp<GrVkTexture> copyTexture;
880     if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
881         bool dstHasYcbcr = tex->ycbcrConversionInfo().isValid();
882         if (!this->vkCaps().canCopyAsBlit(tex->imageFormat(), 1, false, dstHasYcbcr,
883                                           VK_FORMAT_R8G8B8A8_UNORM, 1, false, false)) {
884             return false;
885         }
886 
887         VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
888                                        VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
889                                        VK_IMAGE_USAGE_TRANSFER_DST_BIT;
890 
891         GrVkImage::ImageDesc imageDesc;
892         imageDesc.fImageType = VK_IMAGE_TYPE_2D;
893         imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
894         imageDesc.fWidth = width;
895         imageDesc.fHeight = height;
896         imageDesc.fLevels = 1;
897         imageDesc.fSamples = 1;
898         imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
899         imageDesc.fUsageFlags = usageFlags;
900         imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
901 
902         copyTexture = GrVkTexture::MakeNewTexture(this, SkBudgeted::kYes, {width, height},
903                                                   imageDesc, GrMipmapStatus::kNotAllocated);
904         if (!copyTexture) {
905             return false;
906         }
907 
908         uploadTexture = copyTexture.get();
909         uploadLeft = 0;
910         uploadTop = 0;
911     }
912 
913     char* buffer = (char*) slice.fOffsetMapPtr;
914     SkTArray<VkBufferImageCopy> regions(mipLevelCount);
915 
916     currentWidth = width;
917     currentHeight = height;
918     int layerHeight = uploadTexture->height();
919     for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
920         if (texelsShallowCopy[currentMipLevel].fPixels) {
921             SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
922             const size_t trimRowBytes = currentWidth * bpp;
923             const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
924 
925             // copy data into the buffer, skipping the trailing bytes
926             char* dst = buffer + individualMipOffsets[currentMipLevel];
927             const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
928             SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
929 
930             VkBufferImageCopy& region = regions.push_back();
931             memset(&region, 0, sizeof(VkBufferImageCopy));
932             region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
933             region.bufferRowLength = currentWidth;
934             region.bufferImageHeight = currentHeight;
935             region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
936             region.imageOffset = {uploadLeft, uploadTop, 0};
937             region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
938         }
939         currentWidth = std::max(1, currentWidth/2);
940         currentHeight = std::max(1, currentHeight/2);
941         layerHeight = currentHeight;
942     }
943 
944     // Change layout of our target so it can be copied to
945     uploadTexture->setImageLayout(this,
946                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
947                                   VK_ACCESS_TRANSFER_WRITE_BIT,
948                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
949                                   false);
950 
951     // Copy the buffer to the image
952     this->currentCommandBuffer()->copyBufferToImage(this,
953                                                     static_cast<GrVkTransferBuffer*>(slice.fBuffer),
954                                                     uploadTexture,
955                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
956                                                     regions.count(),
957                                                     regions.begin());
958 
959     // If we copied the data into a temporary image first, copy that image into our main texture
960     // now.
961     if (copyTexture) {
962         SkASSERT(dataColorType == GrColorType::kRGB_888x);
963         SkAssertResult(this->copySurface(tex, copyTexture.get(), SkIRect::MakeWH(width, height),
964                                          SkIPoint::Make(left, top)));
965     }
966     if (1 == mipLevelCount) {
967         tex->markMipmapsDirty();
968     }
969 
970     return true;
971 }
972 
973 // It's probably possible to roll this into uploadTexDataOptimal,
974 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkTexture * uploadTexture,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipMapped,const void * data,size_t dataSize)975 bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* uploadTexture,
976                                       SkImage::CompressionType compression, VkFormat vkFormat,
977                                       SkISize dimensions, GrMipmapped mipMapped,
978                                       const void* data, size_t dataSize) {
979     if (!this->currentCommandBuffer()) {
980         return false;
981     }
982     SkASSERT(data);
983     SkASSERT(!uploadTexture->isLinearTiled());
984     // For now the assumption is that our rect is the entire texture.
985     // Compressed textures are read-only so this should be a reasonable assumption.
986     SkASSERT(dimensions.fWidth == uploadTexture->width() &&
987              dimensions.fHeight == uploadTexture->height());
988 
989     if (dimensions.fWidth == 0 || dimensions.fHeight  == 0) {
990         return false;
991     }
992 
993     SkASSERT(uploadTexture->imageFormat() == vkFormat);
994     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
995 
996 
997     GrStagingBufferManager::Slice slice;
998     SkTArray<VkBufferImageCopy> regions;
999     SkTArray<size_t> individualMipOffsets;
1000     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_regions(&fStagingBufferManager,
1001                                                              &regions, &individualMipOffsets,
1002                                                              &slice, compression, vkFormat,
1003                                                              dimensions, mipMapped);
1004     if (!slice.fBuffer) {
1005         return false;
1006     }
1007     SkASSERT(dataSize == combinedBufferSize);
1008 
1009     {
1010         char* buffer = (char*)slice.fOffsetMapPtr;
1011         memcpy(buffer, data, dataSize);
1012     }
1013 
1014     // Change layout of our target so it can be copied to
1015     uploadTexture->setImageLayout(this,
1016                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1017                                   VK_ACCESS_TRANSFER_WRITE_BIT,
1018                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
1019                                   false);
1020 
1021     // Copy the buffer to the image
1022     this->currentCommandBuffer()->copyBufferToImage(this,
1023                                                     static_cast<GrVkTransferBuffer*>(slice.fBuffer),
1024                                                     uploadTexture,
1025                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1026                                                     regions.count(),
1027                                                     regions.begin());
1028 
1029     return true;
1030 }
1031 
1032 ////////////////////////////////////////////////////////////////////////////////
1033 // TODO: make this take a GrMipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)1034 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1035                                           const GrBackendFormat& format,
1036                                           GrRenderable renderable,
1037                                           int renderTargetSampleCnt,
1038                                           SkBudgeted budgeted,
1039                                           GrProtected isProtected,
1040                                           int mipLevelCount,
1041                                           uint32_t levelClearMask) {
1042     VkFormat pixelFormat;
1043     SkAssertResult(format.asVkFormat(&pixelFormat));
1044     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1045 
1046     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
1047     if (renderable == GrRenderable::kYes) {
1048         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1049         // We always make our render targets support being used as input attachments
1050         usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1051     }
1052 
1053     // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
1054     // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
1055     // will be using this texture in some copy or not. Also this assumes, as is the current case,
1056     // that all render targets in vulkan are also textures. If we change this practice of setting
1057     // both bits, we must make sure to set the destination bit if we are uploading srcData to the
1058     // texture.
1059     usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1060 
1061     // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
1062     // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
1063     // to 1.
1064     SkASSERT(mipLevelCount > 0);
1065     GrVkImage::ImageDesc imageDesc;
1066     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1067     imageDesc.fFormat = pixelFormat;
1068     imageDesc.fWidth = dimensions.fWidth;
1069     imageDesc.fHeight = dimensions.fHeight;
1070     imageDesc.fLevels = mipLevelCount;
1071     imageDesc.fSamples = 1;
1072     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1073     imageDesc.fUsageFlags = usageFlags;
1074     imageDesc.fIsProtected = isProtected;
1075 
1076     GrMipmapStatus mipmapStatus =
1077             mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1078 
1079     sk_sp<GrVkTexture> tex;
1080     if (renderable == GrRenderable::kYes) {
1081         tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1082                 this, budgeted, dimensions, renderTargetSampleCnt, imageDesc, mipmapStatus);
1083     } else {
1084         tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, imageDesc, mipmapStatus);
1085     }
1086 
1087     if (!tex) {
1088         return nullptr;
1089     }
1090 
1091     if (levelClearMask) {
1092         if (!this->currentCommandBuffer()) {
1093             return nullptr;
1094         }
1095         SkSTArray<1, VkImageSubresourceRange> ranges;
1096         bool inRange = false;
1097         for (uint32_t i = 0; i < tex->mipLevels(); ++i) {
1098             if (levelClearMask & (1U << i)) {
1099                 if (inRange) {
1100                     ranges.back().levelCount++;
1101                 } else {
1102                     auto& range = ranges.push_back();
1103                     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1104                     range.baseArrayLayer = 0;
1105                     range.baseMipLevel = i;
1106                     range.layerCount = 1;
1107                     range.levelCount = 1;
1108                     inRange = true;
1109                 }
1110             } else if (inRange) {
1111                 inRange = false;
1112             }
1113         }
1114         SkASSERT(!ranges.empty());
1115         static constexpr VkClearColorValue kZeroClearColor = {};
1116         tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1117                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1118         this->currentCommandBuffer()->clearColorImage(this, tex.get(), &kZeroClearColor,
1119                                                       ranges.count(), ranges.begin());
1120     }
1121     return std::move(tex);
1122 }
1123 
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)1124 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1125                                                     const GrBackendFormat& format,
1126                                                     SkBudgeted budgeted,
1127                                                     GrMipmapped mipMapped,
1128                                                     GrProtected isProtected,
1129                                                     const void* data, size_t dataSize) {
1130     VkFormat pixelFormat;
1131     SkAssertResult(format.asVkFormat(&pixelFormat));
1132     SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1133 
1134     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
1135 
1136     // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
1137     // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
1138     // will be using this texture in some copy or not. Also this assumes, as is the current case,
1139     // that all render targets in vulkan are also textures. If we change this practice of setting
1140     // both bits, we must make sure to set the destination bit if we are uploading srcData to the
1141     // texture.
1142     usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1143 
1144     int numMipLevels = 1;
1145     if (mipMapped == GrMipmapped::kYes) {
1146         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1147     }
1148 
1149     // Compressed textures with MIP levels or multiple samples are not supported as of now.
1150     GrVkImage::ImageDesc imageDesc;
1151     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1152     imageDesc.fFormat = pixelFormat;
1153     imageDesc.fWidth = dimensions.width();
1154     imageDesc.fHeight = dimensions.height();
1155     imageDesc.fLevels = numMipLevels;
1156     imageDesc.fSamples = 1;
1157     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1158     imageDesc.fUsageFlags = usageFlags;
1159     imageDesc.fIsProtected = isProtected;
1160 
1161     GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
1162                                                                    : GrMipmapStatus::kNotAllocated;
1163 
1164     auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, imageDesc, mipmapStatus);
1165     if (!tex) {
1166         return nullptr;
1167     }
1168 
1169     SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1170     if (!this->uploadTexDataCompressed(tex.get(), compression, pixelFormat, dimensions, mipMapped,
1171                                        data, dataSize)) {
1172         return nullptr;
1173     }
1174 
1175     return std::move(tex);
1176 }
1177 
1178 ////////////////////////////////////////////////////////////////////////////////
1179 
copyBuffer(GrVkBuffer * srcBuffer,GrVkBuffer * dstBuffer,VkDeviceSize srcOffset,VkDeviceSize dstOffset,VkDeviceSize size)1180 void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
1181                          VkDeviceSize dstOffset, VkDeviceSize size) {
1182     if (!this->currentCommandBuffer()) {
1183         return;
1184     }
1185     VkBufferCopy copyRegion;
1186     copyRegion.srcOffset = srcOffset;
1187     copyRegion.dstOffset = dstOffset;
1188     copyRegion.size = size;
1189     this->currentCommandBuffer()->copyBuffer(this, srcBuffer, dstBuffer, 1, &copyRegion);
1190 }
1191 
updateBuffer(GrVkBuffer * buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1192 bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
1193                            VkDeviceSize offset, VkDeviceSize size) {
1194     if (!this->currentCommandBuffer()) {
1195         return false;
1196     }
1197     // Update the buffer
1198     this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1199 
1200     return true;
1201 }
1202 
1203 ////////////////////////////////////////////////////////////////////////////////
1204 
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1205 static bool check_image_info(const GrVkCaps& caps,
1206                              const GrVkImageInfo& info,
1207                              bool needsAllocation,
1208                              uint32_t graphicsQueueIndex) {
1209     if (VK_NULL_HANDLE == info.fImage) {
1210         return false;
1211     }
1212 
1213     if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1214         return false;
1215     }
1216 
1217     if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1218         return false;
1219     }
1220 
1221     if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1222         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1223         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1224         if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1225             if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1226                 return false;
1227             }
1228         } else {
1229             return false;
1230         }
1231     }
1232 
1233     if (info.fYcbcrConversionInfo.isValid()) {
1234         if (!caps.supportsYcbcrConversion()) {
1235             return false;
1236         }
1237         if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1238             return true;
1239         }
1240     }
1241 
1242     // We currently require everything to be made with transfer bits set
1243     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1244         !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1245         return false;
1246     }
1247 
1248     return true;
1249 }
1250 
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1251 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1252     // We don't support directly importing multisampled textures for sampling from shaders.
1253     if (info.fSampleCount != 1) {
1254         return false;
1255     }
1256 
1257     if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1258         return true;
1259     }
1260     if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1261         if (!caps.isVkFormatTexturable(info.fFormat)) {
1262             return false;
1263         }
1264     } else {
1265         SkASSERT(info.fImageTiling == VK_IMAGE_TILING_LINEAR);
1266         if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1267             return false;
1268         }
1269     }
1270 
1271     // We currently require all textures to be made with sample support
1272     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1273         return false;
1274     }
1275 
1276     return true;
1277 }
1278 
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1279 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1280     if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1281         return false;
1282     }
1283     if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1284         return false;
1285     }
1286     return true;
1287 }
1288 
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1289 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1290                                                GrWrapOwnership ownership,
1291                                                GrWrapCacheable cacheable,
1292                                                GrIOType ioType) {
1293     GrVkImageInfo imageInfo;
1294     if (!backendTex.getVkImageInfo(&imageInfo)) {
1295         return nullptr;
1296     }
1297 
1298     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1299                           this->queueIndex())) {
1300         return nullptr;
1301     }
1302 
1303     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1304         return nullptr;
1305     }
1306 
1307     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1308         return nullptr;
1309     }
1310 
1311     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1312     SkASSERT(mutableState);
1313     return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1314                                            ioType, imageInfo, std::move(mutableState));
1315 }
1316 
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1317 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1318                                                          GrWrapOwnership ownership,
1319                                                          GrWrapCacheable cacheable) {
1320     return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1321 }
1322 
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1323 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1324                                                          int sampleCnt,
1325                                                          GrWrapOwnership ownership,
1326                                                          GrWrapCacheable cacheable) {
1327     GrVkImageInfo imageInfo;
1328     if (!backendTex.getVkImageInfo(&imageInfo)) {
1329         return nullptr;
1330     }
1331 
1332     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1333                           this->queueIndex())) {
1334         return nullptr;
1335     }
1336 
1337     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1338         return nullptr;
1339     }
1340     // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1341     // the wrapped VkImage.
1342     bool resolveOnly = sampleCnt > 1;
1343     if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1344         return nullptr;
1345     }
1346 
1347     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1348         return nullptr;
1349     }
1350 
1351     sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1352 
1353     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1354     SkASSERT(mutableState);
1355 
1356     return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1357                                                                    sampleCnt, ownership, cacheable,
1358                                                                    imageInfo,
1359                                                                    std::move(mutableState));
1360 }
1361 
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1362 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1363     GrVkImageInfo info;
1364     if (!backendRT.getVkImageInfo(&info)) {
1365         return nullptr;
1366     }
1367 
1368     if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1369         return nullptr;
1370     }
1371 
1372     // We will always render directly to this VkImage.
1373     static bool kResolveOnly = false;
1374     if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1375         return nullptr;
1376     }
1377 
1378     if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1379         return nullptr;
1380     }
1381 
1382     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendRT.getMutableState();
1383     SkASSERT(mutableState);
1384 
1385     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1386             this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1387 
1388     // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1389     SkASSERT(!backendRT.stencilBits());
1390     if (tgt) {
1391         SkASSERT(tgt->canAttemptStencilAttachment());
1392     }
1393 
1394     return std::move(tgt);
1395 }
1396 
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1397 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1398         const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1399     int maxSize = this->caps()->maxTextureSize();
1400     if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1401         return nullptr;
1402     }
1403 
1404     GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1405     if (!backendFormat.isValid()) {
1406         return nullptr;
1407     }
1408     int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1409     if (!sampleCnt) {
1410         return nullptr;
1411     }
1412 
1413     return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1414 }
1415 
onRegenerateMipMapLevels(GrTexture * tex)1416 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1417     if (!this->currentCommandBuffer()) {
1418         return false;
1419     }
1420     auto* vkTex = static_cast<GrVkTexture*>(tex);
1421     // don't do anything for linearly tiled textures (can't have mipmaps)
1422     if (vkTex->isLinearTiled()) {
1423         SkDebugf("Trying to create mipmap for linear tiled texture");
1424         return false;
1425     }
1426     SkASSERT(tex->textureType() == GrTextureType::k2D);
1427 
1428     // determine if we can blit to and from this format
1429     const GrVkCaps& caps = this->vkCaps();
1430     if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1431         !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1432         !caps.mipmapSupport()) {
1433         return false;
1434     }
1435 
1436     int width = tex->width();
1437     int height = tex->height();
1438     VkImageBlit blitRegion;
1439     memset(&blitRegion, 0, sizeof(VkImageBlit));
1440 
1441     // SkMipmap doesn't include the base level in the level count so we have to add 1
1442     uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1443     SkASSERT(levelCount == vkTex->mipLevels());
1444 
1445     // change layout of the layers so we can write to them.
1446     vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1447                           VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1448 
1449     // setup memory barrier
1450     SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1451     VkImageMemoryBarrier imageMemoryBarrier = {
1452             VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,  // sType
1453             nullptr,                                 // pNext
1454             VK_ACCESS_TRANSFER_WRITE_BIT,            // srcAccessMask
1455             VK_ACCESS_TRANSFER_READ_BIT,             // dstAccessMask
1456             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,    // oldLayout
1457             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,    // newLayout
1458             VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
1459             VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
1460             vkTex->image(),                          // image
1461             {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}  // subresourceRange
1462     };
1463 
1464     // Blit the miplevels
1465     uint32_t mipLevel = 1;
1466     while (mipLevel < levelCount) {
1467         int prevWidth = width;
1468         int prevHeight = height;
1469         width = std::max(1, width / 2);
1470         height = std::max(1, height / 2);
1471 
1472         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1473         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1474                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1475 
1476         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1477         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1478         blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1479         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1480         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1481         blitRegion.dstOffsets[1] = { width, height, 1 };
1482         this->currentCommandBuffer()->blitImage(this,
1483                                                 vkTex->resource(),
1484                                                 vkTex->image(),
1485                                                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1486                                                 vkTex->resource(),
1487                                                 vkTex->image(),
1488                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1489                                                 1,
1490                                                 &blitRegion,
1491                                                 VK_FILTER_LINEAR);
1492         ++mipLevel;
1493     }
1494     if (levelCount > 1) {
1495         // This barrier logically is not needed, but it changes the final level to the same layout
1496         // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1497         // layouts and future layout changes easier. The alternative here would be to track layout
1498         // and memory accesses per layer which doesn't seem work it.
1499         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1500         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1501                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1502         vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1503     }
1504     return true;
1505 }
1506 
1507 ////////////////////////////////////////////////////////////////////////////////
1508 
makeStencilAttachmentForRenderTarget(const GrRenderTarget * rt,SkISize dimensions,int numStencilSamples)1509 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
1510                                                                   SkISize dimensions,
1511                                                                   int numStencilSamples) {
1512     SkASSERT(numStencilSamples == rt->numSamples() || this->caps()->mixedSamplesSupport());
1513     SkASSERT(dimensions.width() >= rt->width());
1514     SkASSERT(dimensions.height() >= rt->height());
1515 
1516     VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1517 
1518     fStats.incStencilAttachmentCreates();
1519     return GrVkAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1520 }
1521 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected)1522 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1523                                                 const GrBackendFormat& format,
1524                                                 int numSamples,
1525                                                 GrProtected isProtected) {
1526     VkFormat pixelFormat;
1527     SkAssertResult(format.asVkFormat(&pixelFormat));
1528     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1529     SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1530 
1531     fStats.incMSAAAttachmentCreates();
1532     return GrVkAttachment::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected);
1533 }
1534 
1535 ////////////////////////////////////////////////////////////////////////////////
1536 
copy_src_data(char * mapPtr,VkFormat vkFormat,const SkTArray<size_t> & individualMipOffsets,const SkPixmap srcData[],int numMipLevels)1537 bool copy_src_data(char* mapPtr, VkFormat vkFormat, const SkTArray<size_t>& individualMipOffsets,
1538                    const SkPixmap srcData[], int numMipLevels) {
1539     SkASSERT(srcData && numMipLevels);
1540     SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1541     SkASSERT(individualMipOffsets.count() == numMipLevels);
1542     SkASSERT(mapPtr);
1543 
1544     size_t bytesPerPixel = GrVkFormatBytesPerBlock(vkFormat);
1545 
1546     for (int level = 0; level < numMipLevels; ++level) {
1547         const size_t trimRB = srcData[level].width() * bytesPerPixel;
1548 
1549         SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1550                      srcData[level].addr(), srcData[level].rowBytes(),
1551                      trimRB, srcData[level].height());
1552     }
1553     return true;
1554 }
1555 
copy_compressed_data(GrVkGpu * gpu,char * mapPtr,const void * rawData,size_t dataSize)1556 bool copy_compressed_data(GrVkGpu* gpu, char* mapPtr,
1557                           const void* rawData, size_t dataSize) {
1558     SkASSERT(mapPtr);
1559     memcpy(mapPtr, rawData, dataSize);
1560     return true;
1561 }
1562 
generate_compressed_data(GrVkGpu * gpu,char * mapPtr,SkImage::CompressionType compression,SkISize dimensions,GrMipmapped mipMapped,const SkColor4f & color)1563 bool generate_compressed_data(GrVkGpu* gpu, char* mapPtr,
1564                               SkImage::CompressionType compression, SkISize dimensions,
1565                               GrMipmapped mipMapped, const SkColor4f& color) {
1566     SkASSERT(mapPtr);
1567     GrFillInCompressedData(compression, dimensions, mipMapped, mapPtr, color);
1568 
1569     return true;
1570 }
1571 
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,GrMipmapped mipMapped,GrVkImageInfo * info,GrProtected isProtected)1572 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1573                                              SkISize dimensions,
1574                                              int sampleCnt,
1575                                              GrTexturable texturable,
1576                                              GrRenderable renderable,
1577                                              GrMipmapped mipMapped,
1578                                              GrVkImageInfo* info,
1579                                              GrProtected isProtected) {
1580     SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1581 
1582     if (fProtectedContext != isProtected) {
1583         return false;
1584     }
1585 
1586     if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1587         return false;
1588     }
1589 
1590     // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1591     if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1592         return false;
1593     }
1594 
1595     if (renderable == GrRenderable::kYes) {
1596         sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1597         if (!sampleCnt) {
1598             return false;
1599         }
1600     }
1601 
1602 
1603     int numMipLevels = 1;
1604     if (mipMapped == GrMipmapped::kYes) {
1605         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1606     }
1607 
1608     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1609                                    VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1610     if (texturable == GrTexturable::kYes) {
1611         usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1612     }
1613     if (renderable == GrRenderable::kYes) {
1614         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1615         // We always make our render targets support being used as input attachments
1616         usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1617     }
1618 
1619     GrVkImage::ImageDesc imageDesc;
1620     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1621     imageDesc.fFormat = vkFormat;
1622     imageDesc.fWidth = dimensions.width();
1623     imageDesc.fHeight = dimensions.height();
1624     imageDesc.fLevels = numMipLevels;
1625     imageDesc.fSamples = sampleCnt;
1626     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1627     imageDesc.fUsageFlags = usageFlags;
1628     imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1629     imageDesc.fIsProtected = fProtectedContext;
1630 
1631     if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1632         SkDebugf("Failed to init image info\n");
1633         return false;
1634     }
1635 
1636     return true;
1637 }
1638 
onUpdateBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const BackendTextureData * data)1639 bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
1640                                      sk_sp<GrRefCntedCallback> finishedCallback,
1641                                      const BackendTextureData* data) {
1642     GrVkImageInfo info;
1643     SkAssertResult(backendTexture.getVkImageInfo(&info));
1644 
1645     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1646     SkASSERT(mutableState);
1647     sk_sp<GrVkTexture> texture =
1648                 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1649                                                 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1650                                                 kRW_GrIOType, info, std::move(mutableState));
1651     if (!texture) {
1652         return false;
1653     }
1654 
1655     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1656     if (!cmdBuffer) {
1657         return false;
1658     }
1659 
1660     texture->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1661                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1662 
1663     // Unfortunately, CmdClearColorImage doesn't work for compressed formats
1664     bool fastPath = data->type() == BackendTextureData::Type::kColor &&
1665                     !GrVkFormatIsCompressed(info.fFormat);
1666 
1667     if (fastPath) {
1668         SkASSERT(data->type() == BackendTextureData::Type::kColor);
1669         VkClearColorValue vkColor;
1670         SkColor4f color = data->color();
1671         // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1672         // uint32 union members in those cases.
1673         vkColor.float32[0] = color.fR;
1674         vkColor.float32[1] = color.fG;
1675         vkColor.float32[2] = color.fB;
1676         vkColor.float32[3] = color.fA;
1677         VkImageSubresourceRange range;
1678         range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1679         range.baseArrayLayer = 0;
1680         range.baseMipLevel = 0;
1681         range.layerCount = 1;
1682         range.levelCount = info.fLevelCount;
1683         cmdBuffer->clearColorImage(this, texture.get(), &vkColor, 1, &range);
1684     } else {
1685         SkImage::CompressionType compression = GrBackendFormatToCompressionType(
1686                 backendTexture.getBackendFormat());
1687 
1688         SkTArray<VkBufferImageCopy> regions;
1689         SkTArray<size_t> individualMipOffsets;
1690         GrStagingBufferManager::Slice slice;
1691 
1692         fill_in_regions(&fStagingBufferManager, &regions, &individualMipOffsets,
1693                         &slice, compression, info.fFormat, backendTexture.dimensions(),
1694                         backendTexture.fMipmapped);
1695 
1696         if (!slice.fBuffer) {
1697             return false;
1698         }
1699 
1700         bool result;
1701         if (data->type() == BackendTextureData::Type::kPixmaps) {
1702             result = copy_src_data((char*)slice.fOffsetMapPtr, info.fFormat, individualMipOffsets,
1703                                    data->pixmaps(), info.fLevelCount);
1704         } else if (data->type() == BackendTextureData::Type::kCompressed) {
1705             result = copy_compressed_data(this, (char*)slice.fOffsetMapPtr,
1706                                           data->compressedData(), data->compressedSize());
1707         } else {
1708             SkASSERT(data->type() == BackendTextureData::Type::kColor);
1709             result = generate_compressed_data(this, (char*)slice.fOffsetMapPtr, compression,
1710                                               backendTexture.dimensions(),
1711                                               backendTexture.fMipmapped, data->color());
1712         }
1713 
1714         cmdBuffer->addGrSurface(texture);
1715         cmdBuffer->copyBufferToImage(this, static_cast<GrVkTransferBuffer*>(slice.fBuffer),
1716                                      texture.get(), texture->currentLayout(), regions.count(),
1717                                      regions.begin());
1718     }
1719 
1720     // Change image layout to shader read since if we use this texture as a borrowed
1721     // texture within Ganesh we require that its layout be set to that
1722     texture->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1723                             VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1724                             false);
1725 
1726     if (finishedCallback) {
1727         this->addFinishedCallback(std::move(finishedCallback));
1728     }
1729     return true;
1730 }
1731 
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)1732 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1733                                                  const GrBackendFormat& format,
1734                                                  GrRenderable renderable,
1735                                                  GrMipmapped mipMapped,
1736                                                  GrProtected isProtected) {
1737     this->handleDirtyContext();
1738 
1739     const GrVkCaps& caps = this->vkCaps();
1740 
1741     if (fProtectedContext != isProtected) {
1742         return {};
1743     }
1744 
1745     VkFormat vkFormat;
1746     if (!format.asVkFormat(&vkFormat)) {
1747         return {};
1748     }
1749 
1750     // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1751     if (!caps.isVkFormatTexturable(vkFormat)) {
1752         return {};
1753     }
1754 
1755     if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
1756         return {};
1757     }
1758 
1759     GrVkImageInfo info;
1760     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1761                                               renderable, mipMapped, &info, isProtected)) {
1762         return {};
1763     }
1764 
1765     return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1766 }
1767 
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)1768 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
1769         SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1770         GrProtected isProtected) {
1771     return this->onCreateBackendTexture(dimensions, format, GrRenderable::kNo, mipMapped,
1772                                         isProtected);
1773 }
1774 
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const BackendTextureData * data)1775 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1776                                                sk_sp<GrRefCntedCallback> finishedCallback,
1777                                                const BackendTextureData* data) {
1778     return this->onUpdateBackendTexture(backendTexture, std::move(finishedCallback), data);
1779 }
1780 
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,const GrVkSharedImageInfo & newInfo)1781 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1782                                              const GrVkSharedImageInfo& newInfo) {
1783     // Even though internally we use this helper for getting src access flags and stages they
1784     // can also be used for general dst flags since we don't know exactly what the client
1785     // plans on using the image for.
1786     VkImageLayout newLayout = newInfo.getImageLayout();
1787     if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1788         newLayout = image->currentLayout();
1789     }
1790     VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1791     VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1792 
1793     uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1794     uint32_t newQueueFamilyIndex = newInfo.getQueueFamilyIndex();
1795     auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1796         return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1797                queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1798     };
1799     if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1800         // It is illegal to have both the new and old queue be special queue families (i.e. external
1801         // or foreign).
1802         return;
1803     }
1804 
1805     image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1806                                        newQueueFamilyIndex);
1807 }
1808 
setBackendSurfaceState(GrVkImageInfo info,sk_sp<GrBackendSurfaceMutableStateImpl> currentState,SkISize dimensions,const GrVkSharedImageInfo & newInfo,GrBackendSurfaceMutableState * previousState)1809 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1810                                      sk_sp<GrBackendSurfaceMutableStateImpl> currentState,
1811                                      SkISize dimensions,
1812                                      const GrVkSharedImageInfo& newInfo,
1813                                      GrBackendSurfaceMutableState* previousState) {
1814     sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(
1815             this, dimensions, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo, kRW_GrIOType, info,
1816             std::move(currentState));
1817     SkASSERT(texture);
1818     if (!texture) {
1819         return false;
1820     }
1821     if (previousState) {
1822         previousState->setVulkanState(texture->currentLayout(),
1823                                       texture->currentQueueFamilyIndex());
1824     }
1825     set_layout_and_queue_from_mutable_state(this, texture.get(), newInfo);
1826     return true;
1827 }
1828 
setBackendTextureState(const GrBackendTexture & backendTeture,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1829 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1830                                      const GrBackendSurfaceMutableState& newState,
1831                                      GrBackendSurfaceMutableState* previousState,
1832                                      sk_sp<GrRefCntedCallback> finishedCallback) {
1833     GrVkImageInfo info;
1834     SkAssertResult(backendTeture.getVkImageInfo(&info));
1835     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendTeture.getMutableState();
1836     SkASSERT(currentState);
1837     SkASSERT(newState.isValid() && newState.fBackend == GrBackend::kVulkan);
1838     return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1839                                         newState.fVkState, previousState);
1840 }
1841 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1842 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1843                                           const GrBackendSurfaceMutableState& newState,
1844                                           GrBackendSurfaceMutableState* previousState,
1845                                           sk_sp<GrRefCntedCallback> finishedCallback) {
1846     GrVkImageInfo info;
1847     SkAssertResult(backendRenderTarget.getVkImageInfo(&info));
1848     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendRenderTarget.getMutableState();
1849     SkASSERT(currentState);
1850     SkASSERT(newState.fBackend == GrBackend::kVulkan);
1851     return this->setBackendSurfaceState(info, std::move(currentState),
1852                                         backendRenderTarget.dimensions(), newState.fVkState,
1853                                         previousState);
1854 }
1855 
querySampleLocations(GrRenderTarget * renderTarget,SkTArray<SkPoint> * sampleLocations)1856 void GrVkGpu::querySampleLocations(GrRenderTarget* renderTarget,
1857                                    SkTArray<SkPoint>* sampleLocations) {
1858     // In Vulkan, sampleLocationsSupport() means that the platform uses the standard sample
1859     // locations defined by the spec.
1860     SkASSERT(this->caps()->sampleLocationsSupport());
1861     static constexpr SkPoint kStandardSampleLocations_1[1] = {
1862         {0.5f, 0.5f}};
1863     static constexpr SkPoint kStandardSampleLocations_2[2] = {
1864         {0.75f, 0.75f}, {0.25f, 0.25f}};
1865     static constexpr SkPoint kStandardSampleLocations_4[4] = {
1866         {0.375f, 0.125f}, {0.875f, 0.375f}, {0.125f, 0.625f}, {0.625f, 0.875f}};
1867     static constexpr SkPoint kStandardSampleLocations_8[8] = {
1868         {0.5625f, 0.3125f}, {0.4375f, 0.6875f}, {0.8125f, 0.5625f}, {0.3125f, 0.1875f},
1869         {0.1875f, 0.8125f}, {0.0625f, 0.4375f}, {0.6875f, 0.9375f}, {0.9375f, 0.0625f}};
1870     static constexpr SkPoint kStandardSampleLocations_16[16] = {
1871         {0.5625f, 0.5625f}, {0.4375f, 0.3125f}, {0.3125f, 0.625f}, {0.75f, 0.4375f},
1872         {0.1875f, 0.375f}, {0.625f, 0.8125f}, {0.8125f, 0.6875f}, {0.6875f, 0.1875f},
1873         {0.375f, 0.875f}, {0.5f, 0.0625f}, {0.25f, 0.125f}, {0.125f, 0.75f},
1874         {0.0f, 0.5f}, {0.9375f, 0.25f}, {0.875f, 0.9375f}, {0.0625f, 0.0f}};
1875 
1876     int numSamples = renderTarget->numSamples();
1877     if (1 == numSamples) {
1878         SkASSERT(this->caps()->mixedSamplesSupport());
1879         if (auto* stencil = renderTarget->getStencilAttachment()) {
1880             numSamples = stencil->numSamples();
1881         }
1882     }
1883     SkASSERT(numSamples > 1);
1884     SkASSERT(!renderTarget->getStencilAttachment() ||
1885              numSamples == renderTarget->getStencilAttachment()->numSamples());
1886 
1887     switch (numSamples) {
1888         case 1:
1889             sampleLocations->push_back_n(1, kStandardSampleLocations_1);
1890             break;
1891         case 2:
1892             sampleLocations->push_back_n(2, kStandardSampleLocations_2);
1893             break;
1894         case 4:
1895             sampleLocations->push_back_n(4, kStandardSampleLocations_4);
1896             break;
1897         case 8:
1898             sampleLocations->push_back_n(8, kStandardSampleLocations_8);
1899             break;
1900         case 16:
1901             sampleLocations->push_back_n(16, kStandardSampleLocations_16);
1902             break;
1903         default:
1904             SK_ABORT("Invalid vulkan sample count.");
1905             break;
1906     }
1907 }
1908 
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)1909 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
1910     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1911     VkPipelineStageFlags dstStage;
1912     VkAccessFlags dstAccess;
1913     if (barrierType == kBlend_GrXferBarrierType) {
1914         dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1915         dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
1916     } else {
1917         SkASSERT(barrierType == kTexture_GrXferBarrierType);
1918         dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1919         dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
1920     }
1921     VkImageMemoryBarrier barrier;
1922     barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1923     barrier.pNext = nullptr;
1924     barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1925     barrier.dstAccessMask = dstAccess;
1926     barrier.oldLayout = vkRT->currentLayout();
1927     barrier.newLayout = vkRT->currentLayout();
1928     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1929     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1930     barrier.image = vkRT->image();
1931     barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, vkRT->mipLevels(), 0, 1};
1932     this->addImageMemoryBarrier(vkRT->resource(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1933                                 dstStage, true, &barrier);
1934 }
1935 
deleteBackendTexture(const GrBackendTexture & tex)1936 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1937     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1938 
1939     GrVkImageInfo info;
1940     if (tex.getVkImageInfo(&info)) {
1941         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1942     }
1943 }
1944 
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)1945 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1946     SkASSERT(!(GrProcessor::CustomFeatures::kSampleLocations & programInfo.requestedFeatures()));
1947 
1948     GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
1949     GrVkRenderPass::AttachmentFlags attachmentFlags;
1950     GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
1951                                                        &attachmentsDescriptor, &attachmentFlags);
1952 
1953     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
1954     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
1955         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
1956     }
1957     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
1958         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
1959     }
1960 
1961     sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
1962             &attachmentsDescriptor, attachmentFlags, selfDepFlags));
1963     if (!renderPass) {
1964         return false;
1965     }
1966 
1967     Stats::ProgramCacheResult stat;
1968 
1969     auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1970                                     desc,
1971                                     programInfo,
1972                                     renderPass->vkRenderPass(),
1973                                     &stat);
1974     if (!pipelineState) {
1975         return false;
1976     }
1977 
1978     return stat != Stats::ProgramCacheResult::kHit;
1979 }
1980 
1981 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const1982 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1983     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1984 
1985     GrVkImageInfo backend;
1986     if (!tex.getVkImageInfo(&backend)) {
1987         return false;
1988     }
1989 
1990     if (backend.fImage && backend.fAlloc.fMemory) {
1991         VkMemoryRequirements req;
1992         memset(&req, 0, sizeof(req));
1993         GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1994                                                                    backend.fImage,
1995                                                                    &req));
1996         // TODO: find a better check
1997         // This will probably fail with a different driver
1998         return (req.size > 0) && (req.size <= 8192 * 8192);
1999     }
2000 
2001     return false;
2002 }
2003 
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)2004 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2005                                                                     GrColorType ct,
2006                                                                     int sampleCnt,
2007                                                                     GrProtected isProtected) {
2008     this->handleDirtyContext();
2009 
2010     if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
2011         dimensions.height() > this->caps()->maxRenderTargetSize()) {
2012         return {};
2013     }
2014 
2015     VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2016 
2017     GrVkImageInfo info;
2018     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo,
2019                                               GrRenderable::kYes, GrMipmapped::kNo, &info,
2020                                               isProtected)) {
2021         return {};
2022     }
2023     return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info);
2024 }
2025 
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)2026 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2027     SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2028 
2029     GrVkImageInfo info;
2030     if (rt.getVkImageInfo(&info)) {
2031         // something in the command buffer may still be using this, so force submit
2032         SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2033         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2034     }
2035 }
2036 
testingOnly_flushGpuAndSync()2037 void GrVkGpu::testingOnly_flushGpuAndSync() {
2038     SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2039 }
2040 #endif
2041 
2042 ////////////////////////////////////////////////////////////////////////////////
2043 
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2044 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2045                                      VkPipelineStageFlags srcStageMask,
2046                                      VkPipelineStageFlags dstStageMask,
2047                                      bool byRegion,
2048                                      VkBufferMemoryBarrier* barrier) const {
2049     if (!this->currentCommandBuffer()) {
2050         return;
2051     }
2052     SkASSERT(resource);
2053     this->currentCommandBuffer()->pipelineBarrier(this,
2054                                                   resource,
2055                                                   srcStageMask,
2056                                                   dstStageMask,
2057                                                   byRegion,
2058                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2059                                                   barrier);
2060 }
2061 
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2062 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2063                                     VkPipelineStageFlags srcStageMask,
2064                                     VkPipelineStageFlags dstStageMask,
2065                                     bool byRegion,
2066                                     VkImageMemoryBarrier* barrier) const {
2067     // If we are in the middle of destroying or abandoning the context we may hit a release proc
2068     // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2069     // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2070     // have a current command buffer. Thus we won't do the queue transfer.
2071     if (!this->currentCommandBuffer()) {
2072         return;
2073     }
2074     SkASSERT(resource);
2075     this->currentCommandBuffer()->pipelineBarrier(this,
2076                                                   resource,
2077                                                   srcStageMask,
2078                                                   dstStageMask,
2079                                                   byRegion,
2080                                                   GrVkCommandBuffer::kImageMemory_BarrierType,
2081                                                   barrier);
2082 }
2083 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)2084 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2085         SkSpan<GrSurfaceProxy*> proxies,
2086         SkSurface::BackendSurfaceAccess access,
2087         const GrBackendSurfaceMutableState* newState) {
2088     // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2089     // not effect what we do here.
2090     if (!proxies.empty() && (access == SkSurface::BackendSurfaceAccess::kPresent || newState)) {
2091         GrVkImage* image;
2092         for (GrSurfaceProxy* proxy : proxies) {
2093             SkASSERT(proxy->isInstantiated());
2094             if (GrTexture* tex = proxy->peekTexture()) {
2095                 image = static_cast<GrVkTexture*>(tex);
2096             } else {
2097                 GrRenderTarget* rt = proxy->peekRenderTarget();
2098                 SkASSERT(rt);
2099                 image = static_cast<GrVkRenderTarget*>(rt);
2100             }
2101             if (newState) {
2102                 const GrVkSharedImageInfo& newInfo = newState->fVkState;
2103                 set_layout_and_queue_from_mutable_state(this, image, newInfo);
2104             }
2105             image->prepareForPresent(this);
2106         }
2107     }
2108 }
2109 
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)2110 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2111                               GrGpuFinishedContext finishedContext) {
2112     SkASSERT(finishedProc);
2113     this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
2114 }
2115 
addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback)2116 void GrVkGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
2117     SkASSERT(finishedCallback);
2118     fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2119 }
2120 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2121 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2122     this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2123 }
2124 
onSubmitToGpu(bool syncCpu)2125 bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
2126     if (syncCpu) {
2127         return this->submitCommandBuffer(kForce_SyncQueue);
2128     } else {
2129         return this->submitCommandBuffer(kSkip_SyncQueue);
2130     }
2131 }
2132 
get_surface_sample_cnt(GrSurface * surf)2133 static int get_surface_sample_cnt(GrSurface* surf) {
2134     if (const GrRenderTarget* rt = surf->asRenderTarget()) {
2135         return rt->numSamples();
2136     }
2137     return 0;
2138 }
2139 
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2140 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
2141                                      GrVkImage* srcImage, const SkIRect& srcRect,
2142                                      const SkIPoint& dstPoint) {
2143     if (!this->currentCommandBuffer()) {
2144         return;
2145     }
2146 
2147 #ifdef SK_DEBUG
2148     int dstSampleCnt = get_surface_sample_cnt(dst);
2149     int srcSampleCnt = get_surface_sample_cnt(src);
2150     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2151     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2152     VkFormat dstFormat = dstImage->imageFormat();
2153     VkFormat srcFormat;
2154     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2155     SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2156                                          srcFormat, srcSampleCnt, srcHasYcbcr));
2157 #endif
2158     if (src->isProtected() && !dst->isProtected()) {
2159         SkDebugf("Can't copy from protected memory to non-protected");
2160         return;
2161     }
2162 
2163     // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2164     // the cache is flushed since it is only being written to.
2165     dstImage->setImageLayout(this,
2166                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2167                              VK_ACCESS_TRANSFER_WRITE_BIT,
2168                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2169                              false);
2170 
2171     srcImage->setImageLayout(this,
2172                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2173                              VK_ACCESS_TRANSFER_READ_BIT,
2174                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2175                              false);
2176 
2177     VkImageCopy copyRegion;
2178     memset(&copyRegion, 0, sizeof(VkImageCopy));
2179     copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2180     copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2181     copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2182     copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2183     copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2184 
2185     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2186     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2187     this->currentCommandBuffer()->copyImage(this,
2188                                             srcImage,
2189                                             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2190                                             dstImage,
2191                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2192                                             1,
2193                                             &copyRegion);
2194 
2195     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2196                                         srcRect.width(), srcRect.height());
2197     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2198     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2199 }
2200 
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2201 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
2202                                 GrVkImage* srcImage, const SkIRect& srcRect,
2203                                 const SkIPoint& dstPoint) {
2204     if (!this->currentCommandBuffer()) {
2205         return;
2206     }
2207 
2208 #ifdef SK_DEBUG
2209     int dstSampleCnt = get_surface_sample_cnt(dst);
2210     int srcSampleCnt = get_surface_sample_cnt(src);
2211     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2212     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2213     VkFormat dstFormat = dstImage->imageFormat();
2214     VkFormat srcFormat;
2215     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2216     SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
2217                                           dstHasYcbcr, srcFormat, srcSampleCnt,
2218                                           srcImage->isLinearTiled(), srcHasYcbcr));
2219 
2220 #endif
2221     if (src->isProtected() && !dst->isProtected()) {
2222         SkDebugf("Can't copy from protected memory to non-protected");
2223         return;
2224     }
2225 
2226     dstImage->setImageLayout(this,
2227                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2228                              VK_ACCESS_TRANSFER_WRITE_BIT,
2229                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2230                              false);
2231 
2232     srcImage->setImageLayout(this,
2233                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2234                              VK_ACCESS_TRANSFER_READ_BIT,
2235                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2236                              false);
2237 
2238     // Flip rect if necessary
2239     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
2240                                         srcRect.height());
2241 
2242     VkImageBlit blitRegion;
2243     memset(&blitRegion, 0, sizeof(VkImageBlit));
2244     blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2245     blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2246     blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2247     blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2248     blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2249     blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2250 
2251     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2252     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2253     this->currentCommandBuffer()->blitImage(this,
2254                                             *srcImage,
2255                                             *dstImage,
2256                                             1,
2257                                             &blitRegion,
2258                                             VK_FILTER_NEAREST); // We never scale so any filter works here
2259 
2260     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2261     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2262 }
2263 
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2264 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2265                                    const SkIPoint& dstPoint) {
2266     if (src->isProtected() && !dst->isProtected()) {
2267         SkDebugf("Can't copy from protected memory to non-protected");
2268         return;
2269     }
2270     GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2271     this->resolveImage(dst, srcRT, srcRect, dstPoint);
2272     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2273                                         srcRect.width(), srcRect.height());
2274     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2275     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2276 }
2277 
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2278 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2279                             const SkIPoint& dstPoint) {
2280 #ifdef SK_DEBUG
2281     if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2282         SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2283     }
2284     if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2285         SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2286     }
2287 #endif
2288     if (src->isProtected() && !dst->isProtected()) {
2289         SkDebugf("Can't copy from protected memory to non-protected");
2290         return false;
2291     }
2292 
2293     int dstSampleCnt = get_surface_sample_cnt(dst);
2294     int srcSampleCnt = get_surface_sample_cnt(src);
2295 
2296     GrVkImage* dstImage;
2297     GrVkImage* srcImage;
2298     GrRenderTarget* dstRT = dst->asRenderTarget();
2299     if (dstRT) {
2300         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2301         if (vkRT->wrapsSecondaryCommandBuffer()) {
2302             return false;
2303         }
2304         dstImage = vkRT->colorAttachmentImage();
2305     } else {
2306         SkASSERT(dst->asTexture());
2307         dstImage = static_cast<GrVkTexture*>(dst->asTexture());
2308     }
2309     GrRenderTarget* srcRT = src->asRenderTarget();
2310     if (srcRT) {
2311         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2312         srcImage = vkRT->colorAttachmentImage();
2313     } else {
2314         SkASSERT(src->asTexture());
2315         srcImage = static_cast<GrVkTexture*>(src->asTexture());
2316     }
2317 
2318     VkFormat dstFormat = dstImage->imageFormat();
2319     VkFormat srcFormat = srcImage->imageFormat();
2320 
2321     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2322     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2323 
2324     if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2325                                         srcFormat, srcSampleCnt, srcHasYcbcr)) {
2326         this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2327         return true;
2328     }
2329 
2330     if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2331                                     srcFormat, srcSampleCnt, srcHasYcbcr)) {
2332         this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2333         return true;
2334     }
2335 
2336     if (this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
2337                                      dstHasYcbcr, srcFormat, srcSampleCnt,
2338                                      srcImage->isLinearTiled(), srcHasYcbcr)) {
2339         this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
2340         return true;
2341     }
2342 
2343     return false;
2344 }
2345 
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2346 bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
2347                            GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
2348                            size_t rowBytes) {
2349     if (surface->isProtected()) {
2350         return false;
2351     }
2352 
2353     if (surfaceColorType != dstColorType) {
2354         return false;
2355     }
2356 
2357     if (!this->currentCommandBuffer()) {
2358         return false;
2359     }
2360 
2361     GrVkImage* image = nullptr;
2362     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2363     if (rt) {
2364         // Reading from render targets that wrap a secondary command buffer is not allowed since
2365         // it would require us to know the VkImage, which we don't have, as well as need us to
2366         // stop and start the VkRenderPass which we don't have access to.
2367         if (rt->wrapsSecondaryCommandBuffer()) {
2368             return false;
2369         }
2370         image = rt;
2371     } else {
2372         image = static_cast<GrVkTexture*>(surface->asTexture());
2373     }
2374 
2375     if (!image) {
2376         return false;
2377     }
2378 
2379     // Skia's RGB_888x color type, which we map to the vulkan R8G8B8_UNORM, expects the data to be
2380     // 32 bits, but the Vulkan format is only 24. So we first copy the surface into an R8G8B8A8
2381     // image and then do the read pixels from that.
2382     sk_sp<GrVkTextureRenderTarget> copySurface;
2383     if (dstColorType == GrColorType::kRGB_888x && image->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
2384         int srcSampleCount = 0;
2385         if (rt) {
2386             srcSampleCount = rt->numSamples();
2387         }
2388         bool srcHasYcbcr = image->ycbcrConversionInfo().isValid();
2389         if (!this->vkCaps().canCopyAsBlit(VK_FORMAT_R8G8B8A8_UNORM, 1, false, false,
2390                                           image->imageFormat(), srcSampleCount,
2391                                           image->isLinearTiled(), srcHasYcbcr)) {
2392             return false;
2393         }
2394 
2395         // Make a new surface that is RGBA to copy the RGB surface into.
2396         VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2397                                        VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
2398                                        VK_IMAGE_USAGE_SAMPLED_BIT |
2399                                        VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2400                                        VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2401 
2402         GrVkImage::ImageDesc imageDesc;
2403         imageDesc.fImageType = VK_IMAGE_TYPE_2D;
2404         imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
2405         imageDesc.fWidth = width;
2406         imageDesc.fHeight = height;
2407         imageDesc.fLevels = 1;
2408         imageDesc.fSamples = 1;
2409         imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
2410         imageDesc.fUsageFlags = usageFlags;
2411         imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
2412 
2413         copySurface = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
2414                 this, SkBudgeted::kYes, {width, height}, 1, imageDesc,
2415                 GrMipmapStatus::kNotAllocated);
2416         if (!copySurface) {
2417             return false;
2418         }
2419 
2420         SkIRect srcRect = SkIRect::MakeXYWH(left, top, width, height);
2421         SkAssertResult(this->copySurface(copySurface.get(), surface, srcRect, SkIPoint::Make(0,0)));
2422 
2423         top = 0;
2424         left = 0;
2425         dstColorType = GrColorType::kRGBA_8888;
2426         image = copySurface.get();
2427     }
2428 
2429     // Change layout of our target so it can be used as copy
2430     image->setImageLayout(this,
2431                           VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2432                           VK_ACCESS_TRANSFER_READ_BIT,
2433                           VK_PIPELINE_STAGE_TRANSFER_BIT,
2434                           false);
2435 
2436     size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2437     if (GrVkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2438         return false;
2439     }
2440     size_t tightRowBytes = bpp * width;
2441 
2442     VkBufferImageCopy region;
2443     memset(&region, 0, sizeof(VkBufferImageCopy));
2444     VkOffset3D offset = { left, top, 0 };
2445     region.imageOffset = offset;
2446     region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
2447 
2448     size_t transBufferRowBytes = bpp * region.imageExtent.width;
2449     size_t imageRows = region.imageExtent.height;
2450     auto transferBuffer = sk_sp<GrVkTransferBuffer>(
2451             static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * imageRows,
2452                                                                 GrGpuBufferType::kXferGpuToCpu,
2453                                                                 kStream_GrAccessPattern)
2454                                                      .release()));
2455 
2456     // Copy the image to a buffer so we can map it to cpu memory
2457     region.bufferOffset = transferBuffer->offset();
2458     region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2459     region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2460     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2461 
2462     this->currentCommandBuffer()->copyImageToBuffer(this,
2463                                                     image,
2464                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2465                                                     transferBuffer.get(),
2466                                                     1,
2467                                                     &region);
2468 
2469     // make sure the copy to buffer has finished
2470     transferBuffer->addMemoryBarrier(this,
2471                                      VK_ACCESS_TRANSFER_WRITE_BIT,
2472                                      VK_ACCESS_HOST_READ_BIT,
2473                                      VK_PIPELINE_STAGE_TRANSFER_BIT,
2474                                      VK_PIPELINE_STAGE_HOST_BIT,
2475                                      false);
2476 
2477     // We need to submit the current command buffer to the Queue and make sure it finishes before
2478     // we can copy the data out of the buffer.
2479     if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2480         return false;
2481     }
2482     void* mappedMemory = transferBuffer->map();
2483 
2484     SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, height);
2485 
2486     transferBuffer->unmap();
2487     return true;
2488 }
2489 
2490 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
2491 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
2492 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)2493 void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds,
2494                                   const VkExtent2D& granularity, int maxWidth, int maxHeight) {
2495     // Adjust Width
2496     if ((0 != granularity.width && 1 != granularity.width)) {
2497         // Start with the right side of rect so we know if we end up going pass the maxWidth.
2498         int rightAdj = srcBounds.fRight % granularity.width;
2499         if (rightAdj != 0) {
2500             rightAdj = granularity.width - rightAdj;
2501         }
2502         dstBounds->fRight = srcBounds.fRight + rightAdj;
2503         if (dstBounds->fRight > maxWidth) {
2504             dstBounds->fRight = maxWidth;
2505             dstBounds->fLeft = 0;
2506         } else {
2507             dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
2508         }
2509     } else {
2510         dstBounds->fLeft = srcBounds.fLeft;
2511         dstBounds->fRight = srcBounds.fRight;
2512     }
2513 
2514     // Adjust height
2515     if ((0 != granularity.height && 1 != granularity.height)) {
2516         // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
2517         int bottomAdj = srcBounds.fBottom % granularity.height;
2518         if (bottomAdj != 0) {
2519             bottomAdj = granularity.height - bottomAdj;
2520         }
2521         dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
2522         if (dstBounds->fBottom > maxHeight) {
2523             dstBounds->fBottom = maxHeight;
2524             dstBounds->fTop = 0;
2525         } else {
2526             dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
2527         }
2528     } else {
2529         dstBounds->fTop = srcBounds.fTop;
2530         dstBounds->fBottom = srcBounds.fBottom;
2531     }
2532 }
2533 
beginRenderPass(const GrVkRenderPass * renderPass,const VkClearValue * colorClear,GrVkRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds,bool forSecondaryCB)2534 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2535                               const VkClearValue* colorClear,
2536                               GrVkRenderTarget* target, GrSurfaceOrigin origin,
2537                               const SkIRect& bounds, bool forSecondaryCB) {
2538     if (!this->currentCommandBuffer()) {
2539         return false;
2540     }
2541     SkASSERT (!target->wrapsSecondaryCommandBuffer());
2542     auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, target->height(), bounds);
2543 
2544     // The bounds we use for the render pass should be of the granularity supported
2545     // by the device.
2546     const VkExtent2D& granularity = renderPass->granularity();
2547     SkIRect adjustedBounds;
2548     if ((0 != granularity.width && 1 != granularity.width) ||
2549         (0 != granularity.height && 1 != granularity.height)) {
2550         adjust_bounds_to_granularity(&adjustedBounds, nativeBounds.asSkIRect(), granularity,
2551                                      target->width(), target->height());
2552     } else {
2553         adjustedBounds = nativeBounds.asSkIRect();
2554     }
2555 
2556 #ifdef SK_DEBUG
2557     uint32_t index;
2558     bool result = renderPass->colorAttachmentIndex(&index);
2559     SkASSERT(result && 0 == index);
2560     result = renderPass->stencilAttachmentIndex(&index);
2561     if (result) {
2562         SkASSERT(1 == index);
2563     }
2564 #endif
2565     VkClearValue clears[2];
2566     clears[0].color = colorClear->color;
2567     clears[1].depthStencil.depth = 0.0f;
2568     clears[1].depthStencil.stencil = 0;
2569 
2570    return this->currentCommandBuffer()->beginRenderPass(this, renderPass, clears, target,
2571                                                         adjustedBounds, forSecondaryCB);
2572 }
2573 
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2574 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2575                             const SkIRect& bounds) {
2576     // We had a command buffer when we started the render pass, we should have one now as well.
2577     SkASSERT(this->currentCommandBuffer());
2578     this->currentCommandBuffer()->endRenderPass(this);
2579     this->didWriteToSurface(target, origin, &bounds);
2580 }
2581 
checkVkResult(VkResult result)2582 bool GrVkGpu::checkVkResult(VkResult result) {
2583     switch (result) {
2584         case VK_SUCCESS:
2585             return true;
2586         case VK_ERROR_DEVICE_LOST:
2587             fDeviceIsLost = true;
2588             return false;
2589         case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2590         case VK_ERROR_OUT_OF_HOST_MEMORY:
2591             this->setOOMed();
2592             return false;
2593         default:
2594             return false;
2595     }
2596 }
2597 
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2598 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2599     if (!this->currentCommandBuffer()) {
2600         return;
2601     }
2602     this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2603 }
2604 
submit(GrOpsRenderPass * renderPass)2605 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2606     SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2607 
2608     fCachedOpsRenderPass->submit();
2609     fCachedOpsRenderPass->reset();
2610 }
2611 
insertFence()2612 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2613     VkFenceCreateInfo createInfo;
2614     memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2615     createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2616     createInfo.pNext = nullptr;
2617     createInfo.flags = 0;
2618     VkFence fence = VK_NULL_HANDLE;
2619     VkResult result;
2620 
2621     VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
2622     if (result != VK_SUCCESS) {
2623         return 0;
2624     }
2625     VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
2626     if (result != VK_SUCCESS) {
2627         VK_CALL(DestroyFence(this->device(), fence, nullptr));
2628         return 0;
2629     }
2630 
2631     static_assert(sizeof(GrFence) >= sizeof(VkFence));
2632     return (GrFence)fence;
2633 }
2634 
waitFence(GrFence fence)2635 bool GrVkGpu::waitFence(GrFence fence) {
2636     SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2637 
2638     VkResult result;
2639     VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0));
2640     return (VK_SUCCESS == result);
2641 }
2642 
deleteFence(GrFence fence) const2643 void GrVkGpu::deleteFence(GrFence fence) const {
2644     VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2645 }
2646 
makeSemaphore(bool isOwned)2647 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2648     return GrVkSemaphore::Make(this, isOwned);
2649 }
2650 
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)2651 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(
2652         const GrBackendSemaphore& semaphore,
2653         GrResourceProvider::SemaphoreWrapType wrapType,
2654         GrWrapOwnership ownership) {
2655     return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2656 }
2657 
insertSemaphore(GrSemaphore * semaphore)2658 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2659     SkASSERT(semaphore);
2660 
2661     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2662 
2663     GrVkSemaphore::Resource* resource = vkSem->getResource();
2664     if (resource->shouldSignal()) {
2665         resource->ref();
2666         fSemaphoresToSignal.push_back(resource);
2667     }
2668 }
2669 
waitSemaphore(GrSemaphore * semaphore)2670 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2671     SkASSERT(semaphore);
2672 
2673     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2674 
2675     GrVkSemaphore::Resource* resource = vkSem->getResource();
2676     if (resource->shouldWait()) {
2677         resource->ref();
2678         fSemaphoresToWaitOn.push_back(resource);
2679     }
2680 }
2681 
prepareTextureForCrossContextUsage(GrTexture * texture)2682 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2683     SkASSERT(texture);
2684     GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
2685     vkTexture->setImageLayout(this,
2686                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2687                               VK_ACCESS_SHADER_READ_BIT,
2688                               VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2689                               false);
2690     // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2691     // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2692     // Eventually we will abandon the whole GPU if this fails.
2693     this->submitToGpu(false);
2694 
2695     // The image layout change serves as a barrier, so no semaphore is needed.
2696     // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2697     // thread safe so that only the first thread that tries to use the semaphore actually submits
2698     // it. This additionally would also require thread safety in command buffer submissions to
2699     // queues in general.
2700     return nullptr;
2701 }
2702 
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2703 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2704     fDrawables.emplace_back(std::move(drawable));
2705 }
2706 
storeVkPipelineCacheData()2707 void GrVkGpu::storeVkPipelineCacheData() {
2708     if (this->getContext()->priv().getPersistentCache()) {
2709         this->resourceProvider().storePipelineCacheData();
2710     }
2711 }
2712