1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrVkGpu.h"
9 
10 #include "GrBackendSemaphore.h"
11 #include "GrBackendSurface.h"
12 #include "GrContextOptions.h"
13 #include "GrGeometryProcessor.h"
14 #include "GrGpuResourceCacheAccess.h"
15 #include "GrMesh.h"
16 #include "GrPipeline.h"
17 #include "GrRenderTargetPriv.h"
18 #include "GrTexturePriv.h"
19 
20 #include "GrVkCommandBuffer.h"
21 #include "GrVkGpuCommandBuffer.h"
22 #include "GrVkImage.h"
23 #include "GrVkIndexBuffer.h"
24 #include "GrVkMemory.h"
25 #include "GrVkPipeline.h"
26 #include "GrVkPipelineState.h"
27 #include "GrVkRenderPass.h"
28 #include "GrVkResourceProvider.h"
29 #include "GrVkSemaphore.h"
30 #include "GrVkTexelBuffer.h"
31 #include "GrVkTexture.h"
32 #include "GrVkTextureRenderTarget.h"
33 #include "GrVkTransferBuffer.h"
34 #include "GrVkVertexBuffer.h"
35 
36 #include "SkConvertPixels.h"
37 #include "SkMipMap.h"
38 
39 #include "vk/GrVkInterface.h"
40 #include "vk/GrVkTypes.h"
41 
42 #include "SkSLCompiler.h"
43 
44 #if !defined(SK_BUILD_FOR_WIN)
45 #include <unistd.h>
46 #endif // !defined(SK_BUILD_FOR_WIN)
47 
48 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
49 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
50 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
51 
52 #ifdef SK_ENABLE_VK_LAYERS
DebugReportCallback(VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage,void * pUserData)53 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
54     VkDebugReportFlagsEXT       flags,
55     VkDebugReportObjectTypeEXT  objectType,
56     uint64_t                    object,
57     size_t                      location,
58     int32_t                     messageCode,
59     const char*                 pLayerPrefix,
60     const char*                 pMessage,
61     void*                       pUserData) {
62     if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
63         SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
64         return VK_TRUE; // skip further layers
65     } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
66         SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
67     } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
68         SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
69     } else {
70         SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
71     }
72     return VK_FALSE;
73 }
74 #endif
75 
Make(GrBackendContext backendContext,const GrContextOptions & options,GrContext * context)76 sk_sp<GrGpu> GrVkGpu::Make(GrBackendContext backendContext, const GrContextOptions& options,
77                            GrContext* context) {
78     const auto* backend = reinterpret_cast<const GrVkBackendContext*>(backendContext);
79     return Make(sk_ref_sp(backend), options, context);
80 }
81 
Make(sk_sp<const GrVkBackendContext> backendContext,const GrContextOptions & options,GrContext * context)82 sk_sp<GrGpu> GrVkGpu::Make(sk_sp<const GrVkBackendContext> backendContext,
83                            const GrContextOptions& options, GrContext* context) {
84     if (!backendContext) {
85         return nullptr;
86     }
87 
88     if (!backendContext->fInterface->validate(backendContext->fExtensions)) {
89         return nullptr;
90     }
91 
92     return sk_sp<GrGpu>(new GrVkGpu(context, options, std::move(backendContext)));
93 }
94 
95 ////////////////////////////////////////////////////////////////////////////////
96 
GrVkGpu(GrContext * context,const GrContextOptions & options,sk_sp<const GrVkBackendContext> backendCtx)97 GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
98                  sk_sp<const GrVkBackendContext> backendCtx)
99         : INHERITED(context)
100         , fBackendContext(std::move(backendCtx))
101         , fDevice(fBackendContext->fDevice)
102         , fQueue(fBackendContext->fQueue)
103         , fResourceProvider(this)
104         , fDisconnected(false) {
105 #ifdef SK_ENABLE_VK_LAYERS
106     fCallback = VK_NULL_HANDLE;
107     if (fBackendContext->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
108         // Setup callback creation information
109         VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
110         callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
111         callbackCreateInfo.pNext = nullptr;
112         callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
113                                    VK_DEBUG_REPORT_WARNING_BIT_EXT |
114                                    //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
115                                    //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
116                                    VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
117         callbackCreateInfo.pfnCallback = &DebugReportCallback;
118         callbackCreateInfo.pUserData = nullptr;
119 
120         // Register the callback
121         GR_VK_CALL_ERRCHECK(this->vkInterface(),
122                             CreateDebugReportCallbackEXT(fBackendContext->fInstance,
123                                                          &callbackCreateInfo, nullptr, &fCallback));
124     }
125 #endif
126 
127     fCompiler = new SkSL::Compiler();
128 
129     fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), fBackendContext->fPhysicalDevice,
130                                fBackendContext->fFeatures, fBackendContext->fExtensions));
131     fCaps.reset(SkRef(fVkCaps.get()));
132 
133     VK_CALL(GetPhysicalDeviceProperties(fBackendContext->fPhysicalDevice, &fPhysDevProps));
134     VK_CALL(GetPhysicalDeviceMemoryProperties(fBackendContext->fPhysicalDevice, &fPhysDevMemProps));
135 
136     const VkCommandPoolCreateInfo cmdPoolInfo = {
137         VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,      // sType
138         nullptr,                                         // pNext
139         VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
140         VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags
141         fBackendContext->fGraphicsQueueIndex,            // queueFamilyIndex
142     };
143     GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
144                                                                &fCmdPool));
145 
146     // must call this after creating the CommandPool
147     fResourceProvider.init();
148     fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer();
149     SkASSERT(fCurrentCmdBuffer);
150     fCurrentCmdBuffer->begin(this);
151 
152     // set up our heaps
153     fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
154     fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 64*1024*1024));
155     fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 2*1024*1024));
156     fHeaps[kVertexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
157     fHeaps[kIndexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
158     fHeaps[kUniformBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 256*1024));
159     fHeaps[kTexelBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
160     fHeaps[kCopyReadBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
161     fHeaps[kCopyWriteBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
162 }
163 
destroyResources()164 void GrVkGpu::destroyResources() {
165     if (fCurrentCmdBuffer) {
166         fCurrentCmdBuffer->end(this);
167         fCurrentCmdBuffer->unref(this);
168     }
169 
170     // wait for all commands to finish
171     fResourceProvider.checkCommandBuffers();
172     VkResult res = VK_CALL(QueueWaitIdle(fQueue));
173 
174     // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
175     // on the command buffers even though they have completed. This causes an assert to fire when
176     // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
177     // sleep to make sure the fence signals.
178 #ifdef SK_DEBUG
179     if (this->vkCaps().mustSleepOnTearDown()) {
180 #if defined(SK_BUILD_FOR_WIN)
181         Sleep(10); // In milliseconds
182 #else
183         sleep(1);  // In seconds
184 #endif
185     }
186 #endif
187 
188 #ifdef SK_DEBUG
189     SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
190 #endif
191 
192     for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
193         fSemaphoresToWaitOn[i]->unref(this);
194     }
195     fSemaphoresToWaitOn.reset();
196 
197     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
198         fSemaphoresToSignal[i]->unref(this);
199     }
200     fSemaphoresToSignal.reset();
201 
202 
203     fCopyManager.destroyResources(this);
204 
205     // must call this just before we destroy the command pool and VkDevice
206     fResourceProvider.destroyResources(VK_ERROR_DEVICE_LOST == res);
207 
208     if (fCmdPool != VK_NULL_HANDLE) {
209         VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
210     }
211 
212 #ifdef SK_ENABLE_VK_LAYERS
213     if (fCallback) {
214         VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr));
215     }
216 #endif
217 
218 }
219 
~GrVkGpu()220 GrVkGpu::~GrVkGpu() {
221     if (!fDisconnected) {
222         this->destroyResources();
223     }
224     delete fCompiler;
225 }
226 
227 
disconnect(DisconnectType type)228 void GrVkGpu::disconnect(DisconnectType type) {
229     INHERITED::disconnect(type);
230     if (!fDisconnected) {
231         if (DisconnectType::kCleanup == type) {
232             this->destroyResources();
233         } else {
234             fCurrentCmdBuffer->unrefAndAbandon();
235             for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
236                 fSemaphoresToWaitOn[i]->unrefAndAbandon();
237             }
238             for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
239                 fSemaphoresToSignal[i]->unrefAndAbandon();
240             }
241             fCopyManager.abandonResources();
242 
243             // must call this just before we destroy the command pool and VkDevice
244             fResourceProvider.abandonResources();
245         }
246         fSemaphoresToWaitOn.reset();
247         fSemaphoresToSignal.reset();
248 #ifdef SK_ENABLE_VK_LAYERS
249         fCallback = VK_NULL_HANDLE;
250 #endif
251         fCurrentCmdBuffer = nullptr;
252         fCmdPool = VK_NULL_HANDLE;
253         fDisconnected = true;
254     }
255 }
256 
257 ///////////////////////////////////////////////////////////////////////////////
258 
createCommandBuffer(GrRenderTarget * rt,GrSurfaceOrigin origin,const GrGpuRTCommandBuffer::LoadAndStoreInfo & colorInfo,const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo & stencilInfo)259 GrGpuRTCommandBuffer* GrVkGpu::createCommandBuffer(
260             GrRenderTarget* rt, GrSurfaceOrigin origin,
261             const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
262             const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
263     return new GrVkGpuRTCommandBuffer(this, rt, origin, colorInfo, stencilInfo);
264 }
265 
createCommandBuffer(GrTexture * texture,GrSurfaceOrigin origin)266 GrGpuTextureCommandBuffer* GrVkGpu::createCommandBuffer(GrTexture* texture,
267                                                         GrSurfaceOrigin origin) {
268     return new GrVkGpuTextureCommandBuffer(this, texture, origin);
269 }
270 
submitCommandBuffer(SyncQueue sync)271 void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
272     SkASSERT(fCurrentCmdBuffer);
273     fCurrentCmdBuffer->end(this);
274 
275     fCurrentCmdBuffer->submitToQueue(this, fQueue, sync, fSemaphoresToSignal, fSemaphoresToWaitOn);
276 
277     for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
278         fSemaphoresToWaitOn[i]->unref(this);
279     }
280     fSemaphoresToWaitOn.reset();
281     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
282         fSemaphoresToSignal[i]->unref(this);
283     }
284     fSemaphoresToSignal.reset();
285 
286     fResourceProvider.checkCommandBuffers();
287 
288     // Release old command buffer and create a new one
289     fCurrentCmdBuffer->unref(this);
290     fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer();
291     SkASSERT(fCurrentCmdBuffer);
292 
293     fCurrentCmdBuffer->begin(this);
294 }
295 
296 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrBufferType type,GrAccessPattern accessPattern,const void * data)297 GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern,
298                                   const void* data) {
299     GrBuffer* buff;
300     switch (type) {
301         case kVertex_GrBufferType:
302             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
303                      kStatic_GrAccessPattern == accessPattern);
304             buff = GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
305             break;
306         case kIndex_GrBufferType:
307             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
308                      kStatic_GrAccessPattern == accessPattern);
309             buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
310             break;
311         case kXferCpuToGpu_GrBufferType:
312             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
313                      kStream_GrAccessPattern == accessPattern);
314             buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
315             break;
316         case kXferGpuToCpu_GrBufferType:
317             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
318                      kStream_GrAccessPattern == accessPattern);
319             buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
320             break;
321         case kTexel_GrBufferType:
322             SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
323                      kStatic_GrAccessPattern == accessPattern);
324             buff = GrVkTexelBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
325             break;
326         case kDrawIndirect_GrBufferType:
327             SK_ABORT("DrawIndirect Buffers not supported  in vulkan backend.");
328             return nullptr;
329         default:
330             SK_ABORT("Unknown buffer type.");
331             return nullptr;
332     }
333     if (data && buff) {
334         buff->updateData(data, size);
335     }
336     return buff;
337 }
338 
339 ////////////////////////////////////////////////////////////////////////////////
onGetWritePixelsInfo(GrSurface * dstSurface,GrSurfaceOrigin dstOrigin,int width,int height,GrColorType srcColorType,DrawPreference * drawPreference,WritePixelTempDrawInfo * tempDrawInfo)340 bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, GrSurfaceOrigin dstOrigin, int width,
341                                    int height, GrColorType srcColorType,
342                                    DrawPreference* drawPreference,
343                                    WritePixelTempDrawInfo* tempDrawInfo) {
344     // We don't want to introduce a sRGB conversion if we trigger a draw.
345     auto srcConfigSRGBEncoded = GrPixelConfigIsSRGBEncoded(dstSurface->config());
346     if (*drawPreference != kNoDraw_DrawPreference) {
347         // We assume the base class has only inserted a draw for sRGB reasons. So the temp surface
348         // has the config of the original src data. There is no swizzling nor src config spoofing.
349         SkASSERT(tempDrawInfo->fWriteColorType == srcColorType);
350         SkASSERT(GrPixelConfigToColorType(tempDrawInfo->fTempSurfaceDesc.fConfig) == srcColorType);
351         SkASSERT(tempDrawInfo->fSwizzle == GrSwizzle::RGBA());
352         // Don't undo a sRGB conversion introduced by our caller via an intermediate draw.
353         srcConfigSRGBEncoded = GrPixelConfigIsSRGBEncoded(tempDrawInfo->fTempSurfaceDesc.fConfig);
354     }
355     if (GrColorTypeIsAlphaOnly(srcColorType)) {
356         srcConfigSRGBEncoded = GrSRGBEncoded::kNo;
357     }
358     GrRenderTarget* renderTarget = dstSurface->asRenderTarget();
359 
360     if (GrPixelConfigToColorType(dstSurface->config()) == srcColorType) {
361         // We only support writing pixels to textures. Forcing a draw lets us write to pure RTs.
362         if (!dstSurface->asTexture()) {
363             ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
364         }
365         // If the dst is MSAA, we have to draw, or we'll just be writing to the resolve target.
366         if (renderTarget && renderTarget->numColorSamples() > 1) {
367             ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
368         }
369         return true;
370     }
371 
372     // Any color type change requires a draw
373     ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
374 
375     auto srcAsConfig = GrColorTypeToPixelConfig(srcColorType, srcConfigSRGBEncoded);
376     SkASSERT(srcAsConfig != kUnknown_GrPixelConfig);
377     bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcAsConfig) == dstSurface->config();
378 
379     if (!this->vkCaps().isConfigTexturable(srcAsConfig) && configsAreRBSwaps) {
380         tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
381         tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
382         tempDrawInfo->fWriteColorType = GrPixelConfigToColorType(dstSurface->config());
383     }
384     return true;
385 }
386 
onWritePixels(GrSurface * surface,GrSurfaceOrigin origin,int left,int top,int width,int height,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)387 bool GrVkGpu::onWritePixels(GrSurface* surface, GrSurfaceOrigin origin, int left, int top,
388                             int width, int height, GrColorType srcColorType,
389                             const GrMipLevel texels[], int mipLevelCount) {
390     GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
391     if (!vkTex) {
392         return false;
393     }
394 
395     // Make sure we have at least the base level
396     if (!mipLevelCount || !texels[0].fPixels) {
397         return false;
398     }
399 
400     bool success = false;
401     bool linearTiling = vkTex->isLinearTiled();
402     if (linearTiling) {
403         if (mipLevelCount > 1) {
404             SkDebugf("Can't upload mipmap data to linear tiled texture");
405             return false;
406         }
407         if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
408             // Need to change the layout to general in order to perform a host write
409             vkTex->setImageLayout(this,
410                                   VK_IMAGE_LAYOUT_GENERAL,
411                                   VK_ACCESS_HOST_WRITE_BIT,
412                                   VK_PIPELINE_STAGE_HOST_BIT,
413                                   false);
414             this->submitCommandBuffer(kForce_SyncQueue);
415         }
416         success = this->uploadTexDataLinear(vkTex, origin, left, top, width, height, srcColorType,
417                                             texels[0].fPixels, texels[0].fRowBytes);
418     } else {
419         int currentMipLevels = vkTex->texturePriv().maxMipMapLevel() + 1;
420         if (mipLevelCount > currentMipLevels) {
421             if (!vkTex->reallocForMipmap(this, mipLevelCount)) {
422                 return false;
423             }
424         }
425         success = this->uploadTexDataOptimal(vkTex, origin, left, top, width, height, srcColorType,
426                                              texels, mipLevelCount);
427     }
428 
429     return success;
430 }
431 
onTransferPixels(GrTexture * texture,int left,int top,int width,int height,GrColorType bufferColorType,GrBuffer * transferBuffer,size_t bufferOffset,size_t rowBytes)432 bool GrVkGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
433                                GrColorType bufferColorType, GrBuffer* transferBuffer,
434                                size_t bufferOffset, size_t rowBytes) {
435     // Vulkan only supports 4-byte aligned offsets
436     if (SkToBool(bufferOffset & 0x2)) {
437         return false;
438     }
439     GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
440     if (!vkTex) {
441         return false;
442     }
443     GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
444     if (!vkBuffer) {
445         return false;
446     }
447 
448     SkDEBUGCODE(
449         SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
450         SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
451         SkASSERT(bounds.contains(subRect));
452     )
453     int bpp = GrColorTypeBytesPerPixel(bufferColorType);
454     if (rowBytes == 0) {
455         rowBytes = bpp * width;
456     }
457 
458     // Set up copy region
459     VkBufferImageCopy region;
460     memset(&region, 0, sizeof(VkBufferImageCopy));
461     region.bufferOffset = bufferOffset;
462     region.bufferRowLength = (uint32_t)(rowBytes/bpp);
463     region.bufferImageHeight = 0;
464     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
465     region.imageOffset = { left, top, 0 };
466     region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
467 
468     // Change layout of our target so it can be copied to
469     vkTex->setImageLayout(this,
470                           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
471                           VK_ACCESS_TRANSFER_WRITE_BIT,
472                           VK_PIPELINE_STAGE_TRANSFER_BIT,
473                           false);
474 
475     // Copy the buffer to the image
476     fCurrentCmdBuffer->copyBufferToImage(this,
477                                          vkBuffer,
478                                          vkTex,
479                                          VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
480                                          1,
481                                          &region);
482 
483     vkTex->texturePriv().markMipMapsDirty();
484     return true;
485 }
486 
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)487 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
488                            const SkIPoint& dstPoint) {
489     SkASSERT(dst);
490     SkASSERT(src && src->numColorSamples() > 1 && src->msaaImage());
491 
492     if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) {
493         this->submitCommandBuffer(GrVkGpu::kSkip_SyncQueue);
494     }
495 
496     VkImageResolve resolveInfo;
497     resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
498     resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
499     resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
500     resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
501     resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
502 
503     GrVkImage* dstImage;
504     GrRenderTarget* dstRT = dst->asRenderTarget();
505     if (dstRT) {
506         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
507         dstImage = vkRT;
508     } else {
509         SkASSERT(dst->asTexture());
510         dstImage = static_cast<GrVkTexture*>(dst->asTexture());
511     }
512     dstImage->setImageLayout(this,
513                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
514                              VK_ACCESS_TRANSFER_WRITE_BIT,
515                              VK_PIPELINE_STAGE_TRANSFER_BIT,
516                              false);
517 
518     src->msaaImage()->setImageLayout(this,
519                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
520                                      VK_ACCESS_TRANSFER_READ_BIT,
521                                      VK_PIPELINE_STAGE_TRANSFER_BIT,
522                                      false);
523 
524     fCurrentCmdBuffer->resolveImage(this, *src->msaaImage(), *dstImage, 1, &resolveInfo);
525 }
526 
internalResolveRenderTarget(GrRenderTarget * target,bool requiresSubmit)527 void GrVkGpu::internalResolveRenderTarget(GrRenderTarget* target, bool requiresSubmit) {
528     if (target->needsResolve()) {
529         SkASSERT(target->numColorSamples() > 1);
530         GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
531         SkASSERT(rt->msaaImage());
532 
533         const SkIRect& srcRect = rt->getResolveRect();
534 
535         this->resolveImage(target, rt, srcRect, SkIPoint::Make(srcRect.fLeft, srcRect.fTop));
536 
537         rt->flagAsResolved();
538 
539         if (requiresSubmit) {
540             this->submitCommandBuffer(kSkip_SyncQueue);
541         }
542     }
543 }
544 
uploadTexDataLinear(GrVkTexture * tex,GrSurfaceOrigin texOrigin,int left,int top,int width,int height,GrColorType dataColorType,const void * data,size_t rowBytes)545 bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, GrSurfaceOrigin texOrigin, int left, int top,
546                                   int width, int height, GrColorType dataColorType,
547                                   const void* data, size_t rowBytes) {
548     SkASSERT(data);
549     SkASSERT(tex->isLinearTiled());
550 
551     SkDEBUGCODE(
552         SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
553         SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height());
554         SkASSERT(bounds.contains(subRect));
555     )
556     int bpp = GrColorTypeBytesPerPixel(dataColorType);
557     size_t trimRowBytes = width * bpp;
558     if (!rowBytes) {
559         rowBytes = trimRowBytes;
560     }
561 
562     SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
563              VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
564     const VkImageSubresource subres = {
565         VK_IMAGE_ASPECT_COLOR_BIT,
566         0,  // mipLevel
567         0,  // arraySlice
568     };
569     VkSubresourceLayout layout;
570     VkResult err;
571 
572     const GrVkInterface* interface = this->vkInterface();
573 
574     GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
575                                                     tex->image(),
576                                                     &subres,
577                                                     &layout));
578 
579     int texTop = kBottomLeft_GrSurfaceOrigin == texOrigin ? tex->height() - top - height : top;
580     const GrVkAlloc& alloc = tex->alloc();
581     VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp;
582     VkDeviceSize offsetDiff = 0;
583     VkDeviceSize size = height*layout.rowPitch;
584     // For Noncoherent buffers we want to make sure the range that we map, both offset and size,
585     // are aligned to the nonCoherentAtomSize limit. We may have to move the initial offset back to
586     // meet the alignment requirements. So we track how far we move back and then adjust the mapped
587     // ptr back up so that this is opaque to the caller.
588     if (SkToBool(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag)) {
589         VkDeviceSize alignment = this->physicalDeviceProperties().limits.nonCoherentAtomSize;
590         offsetDiff = offset & (alignment - 1);
591         offset = offset - offsetDiff;
592         // Make size of the map aligned to nonCoherentAtomSize
593         size = (size + alignment - 1) & ~(alignment - 1);
594     }
595     SkASSERT(offset >= alloc.fOffset);
596     SkASSERT(size <= alloc.fOffset + alloc.fSize);
597     void* mapPtr;
598     err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size, 0, &mapPtr));
599     if (err) {
600         return false;
601     }
602     mapPtr = reinterpret_cast<char*>(mapPtr) + offsetDiff;
603 
604     if (kBottomLeft_GrSurfaceOrigin == texOrigin) {
605         // copy into buffer by rows
606         const char* srcRow = reinterpret_cast<const char*>(data);
607         char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
608         for (int y = 0; y < height; y++) {
609             memcpy(dstRow, srcRow, trimRowBytes);
610             srcRow += rowBytes;
611             dstRow -= layout.rowPitch;
612         }
613     } else {
614         SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
615                      height);
616     }
617 
618     GrVkMemory::FlushMappedAlloc(this, alloc, size);
619     GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
620 
621     return true;
622 }
623 
uploadTexDataOptimal(GrVkTexture * tex,GrSurfaceOrigin texOrigin,int left,int top,int width,int height,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)624 bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, GrSurfaceOrigin texOrigin, int left, int top,
625                                    int width, int height, GrColorType dataColorType,
626                                    const GrMipLevel texels[], int mipLevelCount) {
627     SkASSERT(!tex->isLinearTiled());
628     // The assumption is either that we have no mipmaps, or that our rect is the entire texture
629     SkASSERT(1 == mipLevelCount ||
630              (0 == left && 0 == top && width == tex->width() && height == tex->height()));
631 
632     // We assume that if the texture has mip levels, we either upload to all the levels or just the
633     // first.
634     SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1));
635 
636     if (width == 0 || height == 0) {
637         return false;
638     }
639 
640     SkASSERT(this->caps()->isConfigTexturable(tex->config()));
641     int bpp = GrColorTypeBytesPerPixel(dataColorType);
642 
643     // texels is const.
644     // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
645     // Because of this we need to make a non-const shallow copy of texels.
646     SkAutoTMalloc<GrMipLevel> texelsShallowCopy;
647 
648     if (mipLevelCount) {
649         texelsShallowCopy.reset(mipLevelCount);
650         memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel));
651     }
652 
653     // Determine whether we need to flip when we copy into the buffer
654     bool flipY = (kBottomLeft_GrSurfaceOrigin == texOrigin && mipLevelCount);
655 
656     SkTArray<size_t> individualMipOffsets(mipLevelCount);
657     individualMipOffsets.push_back(0);
658     size_t combinedBufferSize = width * bpp * height;
659     int currentWidth = width;
660     int currentHeight = height;
661     if (mipLevelCount > 0  && !texelsShallowCopy[0].fPixels) {
662         combinedBufferSize = 0;
663     }
664 
665     // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
666     // config. This works with the assumption that the bytes in pixel config is always a power of 2.
667     SkASSERT((bpp & (bpp - 1)) == 0);
668     const size_t alignmentMask = 0x3 | (bpp - 1);
669     for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; currentMipLevel++) {
670         currentWidth = SkTMax(1, currentWidth/2);
671         currentHeight = SkTMax(1, currentHeight/2);
672 
673         if (texelsShallowCopy[currentMipLevel].fPixels) {
674             const size_t trimmedSize = currentWidth * bpp * currentHeight;
675             const size_t alignmentDiff = combinedBufferSize & alignmentMask;
676             if (alignmentDiff != 0) {
677                 combinedBufferSize += alignmentMask - alignmentDiff + 1;
678             }
679             individualMipOffsets.push_back(combinedBufferSize);
680             combinedBufferSize += trimmedSize;
681         } else {
682             individualMipOffsets.push_back(0);
683         }
684     }
685     if (0 == combinedBufferSize) {
686         // We don't actually have any data to upload so just return success
687         return true;
688     }
689 
690     // allocate buffer to hold our mip data
691     GrVkTransferBuffer* transferBuffer =
692                    GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
693     if(!transferBuffer) {
694         return false;
695     }
696 
697     char* buffer = (char*) transferBuffer->map();
698     SkTArray<VkBufferImageCopy> regions(mipLevelCount);
699 
700     currentWidth = width;
701     currentHeight = height;
702     int layerHeight = tex->height();
703     for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
704         if (texelsShallowCopy[currentMipLevel].fPixels) {
705             SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
706             const size_t trimRowBytes = currentWidth * bpp;
707             const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes
708                                     ? texelsShallowCopy[currentMipLevel].fRowBytes
709                                     : trimRowBytes;
710 
711             // copy data into the buffer, skipping the trailing bytes
712             char* dst = buffer + individualMipOffsets[currentMipLevel];
713             const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
714             if (flipY) {
715                 src += (currentHeight - 1) * rowBytes;
716                 for (int y = 0; y < currentHeight; y++) {
717                     memcpy(dst, src, trimRowBytes);
718                     src -= rowBytes;
719                     dst += trimRowBytes;
720                 }
721             } else {
722                 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
723             }
724 
725             VkBufferImageCopy& region = regions.push_back();
726             memset(&region, 0, sizeof(VkBufferImageCopy));
727             region.bufferOffset = transferBuffer->offset() + individualMipOffsets[currentMipLevel];
728             region.bufferRowLength = currentWidth;
729             region.bufferImageHeight = currentHeight;
730             region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
731             region.imageOffset = { left, flipY ? layerHeight - top - currentHeight : top, 0 };
732             region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
733         }
734         currentWidth = SkTMax(1, currentWidth/2);
735         currentHeight = SkTMax(1, currentHeight/2);
736         layerHeight = currentHeight;
737     }
738 
739     // no need to flush non-coherent memory, unmap will do that for us
740     transferBuffer->unmap();
741 
742     // Change layout of our target so it can be copied to
743     tex->setImageLayout(this,
744                         VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
745                         VK_ACCESS_TRANSFER_WRITE_BIT,
746                         VK_PIPELINE_STAGE_TRANSFER_BIT,
747                         false);
748 
749     // Copy the buffer to the image
750     fCurrentCmdBuffer->copyBufferToImage(this,
751                                          transferBuffer,
752                                          tex,
753                                          VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
754                                          regions.count(),
755                                          regions.begin());
756     transferBuffer->unref();
757     if (1 == mipLevelCount) {
758         tex->texturePriv().markMipMapsDirty();
759     }
760 
761     return true;
762 }
763 
764 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted,const GrMipLevel texels[],int mipLevelCount)765 sk_sp<GrTexture> GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
766                                           const GrMipLevel texels[], int mipLevelCount) {
767     bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
768 
769     VkFormat pixelFormat;
770     SkAssertResult(GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat));
771 
772     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
773     if (renderTarget) {
774         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
775     }
776 
777     // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
778     // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
779     // will be using this texture in some copy or not. Also this assumes, as is the current case,
780     // that all render targets in vulkan are also textures. If we change this practice of setting
781     // both bits, we must make sure to set the destination bit if we are uploading srcData to the
782     // texture.
783     usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
784 
785     // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
786     // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
787     // to 1.
788     int mipLevels = !mipLevelCount ? 1 : mipLevelCount;
789     GrVkImage::ImageDesc imageDesc;
790     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
791     imageDesc.fFormat = pixelFormat;
792     imageDesc.fWidth = desc.fWidth;
793     imageDesc.fHeight = desc.fHeight;
794     imageDesc.fLevels = mipLevels;
795     imageDesc.fSamples = 1;
796     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
797     imageDesc.fUsageFlags = usageFlags;
798     imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
799 
800     GrMipMapsStatus mipMapsStatus = GrMipMapsStatus::kNotAllocated;
801     if (mipLevels > 1) {
802         mipMapsStatus = GrMipMapsStatus::kValid;
803         for (int i = 0; i < mipLevels; ++i) {
804             if (!texels[i].fPixels) {
805                 mipMapsStatus = GrMipMapsStatus::kDirty;
806                 break;
807             }
808         }
809     }
810 
811     sk_sp<GrVkTexture> tex;
812     if (renderTarget) {
813         tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budgeted, desc,
814                                                                     imageDesc,
815                                                                     mipMapsStatus);
816     } else {
817         tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc,
818                                             mipMapsStatus);
819     }
820 
821     if (!tex) {
822         return nullptr;
823     }
824 
825     auto colorType = GrPixelConfigToColorType(desc.fConfig);
826     if (mipLevelCount) {
827         if (!this->uploadTexDataOptimal(tex.get(), desc.fOrigin, 0, 0, desc.fWidth, desc.fHeight,
828                                         colorType, texels, mipLevelCount)) {
829             tex->unref();
830             return nullptr;
831         }
832     }
833 
834     if (desc.fFlags & kPerformInitialClear_GrSurfaceFlag) {
835         VkClearColorValue zeroClearColor;
836         memset(&zeroClearColor, 0, sizeof(zeroClearColor));
837         VkImageSubresourceRange range;
838         range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
839         range.baseArrayLayer = 0;
840         range.baseMipLevel = 0;
841         range.layerCount = 1;
842         range.levelCount = 1;
843         tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
844                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
845         this->currentCommandBuffer()->clearColorImage(this, tex.get(), &zeroClearColor, 1, &range);
846     }
847     return tex;
848 }
849 
850 ////////////////////////////////////////////////////////////////////////////////
851 
copyBuffer(GrVkBuffer * srcBuffer,GrVkBuffer * dstBuffer,VkDeviceSize srcOffset,VkDeviceSize dstOffset,VkDeviceSize size)852 void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
853                          VkDeviceSize dstOffset, VkDeviceSize size) {
854     VkBufferCopy copyRegion;
855     copyRegion.srcOffset = srcOffset;
856     copyRegion.dstOffset = dstOffset;
857     copyRegion.size = size;
858     fCurrentCmdBuffer->copyBuffer(this, srcBuffer, dstBuffer, 1, &copyRegion);
859 }
860 
updateBuffer(GrVkBuffer * buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)861 bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
862                            VkDeviceSize offset, VkDeviceSize size) {
863     // Update the buffer
864     fCurrentCmdBuffer->updateBuffer(this, buffer, offset, size, src);
865 
866     return true;
867 }
868 
869 ////////////////////////////////////////////////////////////////////////////////
870 
check_backend_texture(const GrBackendTexture & backendTex,GrPixelConfig config)871 static bool check_backend_texture(const GrBackendTexture& backendTex,
872                                   GrPixelConfig config) {
873     const GrVkImageInfo* info = backendTex.getVkImageInfo();
874     if (!info) {
875         return false;
876     }
877 
878     if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory) {
879         return false;
880     }
881 
882     SkASSERT(GrVkFormatPixelConfigPairIsValid(info->fFormat, config));
883     return true;
884 }
885 
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership)886 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
887                                                GrWrapOwnership ownership) {
888     if (!check_backend_texture(backendTex, backendTex.config())) {
889         return nullptr;
890     }
891 
892     GrSurfaceDesc surfDesc;
893     surfDesc.fFlags = kNone_GrSurfaceFlags;
894     surfDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // Not actually used in the following
895     surfDesc.fWidth = backendTex.width();
896     surfDesc.fHeight = backendTex.height();
897     surfDesc.fConfig = backendTex.config();
898     surfDesc.fSampleCnt = 1;
899 
900     return GrVkTexture::MakeWrappedTexture(this, surfDesc, ownership, backendTex.getVkImageInfo());
901 }
902 
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership)903 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
904                                                          int sampleCnt,
905                                                          GrWrapOwnership ownership) {
906     if (!check_backend_texture(backendTex, backendTex.config())) {
907         return nullptr;
908     }
909 
910     GrSurfaceDesc surfDesc;
911     surfDesc.fFlags = kRenderTarget_GrSurfaceFlag;
912     surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; // Not actually used in the following
913     surfDesc.fWidth = backendTex.width();
914     surfDesc.fHeight = backendTex.height();
915     surfDesc.fConfig = backendTex.config();
916     surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config());
917 
918     return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, surfDesc, ownership,
919                                                                    backendTex.getVkImageInfo());
920 }
921 
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)922 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT){
923     // Currently the Vulkan backend does not support wrapping of msaa render targets directly. In
924     // general this is not an issue since swapchain images in vulkan are never multisampled. Thus if
925     // you want a multisampled RT it is best to wrap the swapchain images and then let Skia handle
926     // creating and owning the MSAA images.
927     if (backendRT.sampleCnt() > 1) {
928         return nullptr;
929     }
930 
931     const GrVkImageInfo* info = backendRT.getVkImageInfo();
932     if (!info) {
933         return nullptr;
934     }
935     if (VK_NULL_HANDLE == info->fImage) {
936         return nullptr;
937     }
938 
939     GrSurfaceDesc desc;
940     desc.fFlags = kRenderTarget_GrSurfaceFlag;
941     desc.fOrigin = kBottomLeft_GrSurfaceOrigin; // Not actually used in the following
942     desc.fWidth = backendRT.width();
943     desc.fHeight = backendRT.height();
944     desc.fConfig = backendRT.config();
945     desc.fSampleCnt = 1;
946 
947     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, info);
948     if (tgt && backendRT.stencilBits()) {
949         if (!createStencilAttachmentForRenderTarget(tgt.get(), desc.fWidth, desc.fHeight)) {
950             return nullptr;
951         }
952     }
953     return tgt;
954 }
955 
onWrapBackendTextureAsRenderTarget(const GrBackendTexture & tex,int sampleCnt)956 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
957                                                                   int sampleCnt) {
958 
959     const GrVkImageInfo* info = tex.getVkImageInfo();
960     if (!info) {
961         return nullptr;
962     }
963     if (VK_NULL_HANDLE == info->fImage) {
964         return nullptr;
965     }
966 
967     GrSurfaceDesc desc;
968     desc.fFlags = kRenderTarget_GrSurfaceFlag;
969     desc.fOrigin = kBottomLeft_GrSurfaceOrigin; // Not actually used in the following
970     desc.fWidth = tex.width();
971     desc.fHeight = tex.height();
972     desc.fConfig = tex.config();
973     desc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config());
974     if (!desc.fSampleCnt) {
975         return nullptr;
976     }
977 
978     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, info);
979     return tgt;
980 }
981 
generateMipmap(GrVkTexture * tex,GrSurfaceOrigin texOrigin)982 void GrVkGpu::generateMipmap(GrVkTexture* tex, GrSurfaceOrigin texOrigin) {
983     // don't do anything for linearly tiled textures (can't have mipmaps)
984     if (tex->isLinearTiled()) {
985         SkDebugf("Trying to create mipmap for linear tiled texture");
986         return;
987     }
988 
989     // determine if we can blit to and from this format
990     const GrVkCaps& caps = this->vkCaps();
991     if (!caps.configCanBeDstofBlit(tex->config(), false) ||
992         !caps.configCanBeSrcofBlit(tex->config(), false) ||
993         !caps.mipMapSupport()) {
994         return;
995     }
996 
997     if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) {
998         this->submitCommandBuffer(kSkip_SyncQueue);
999     }
1000 
1001     // We may need to resolve the texture first if it is also a render target
1002     GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(tex->asRenderTarget());
1003     if (texRT) {
1004         this->internalResolveRenderTarget(texRT, false);
1005     }
1006 
1007     int width = tex->width();
1008     int height = tex->height();
1009     VkImageBlit blitRegion;
1010     memset(&blitRegion, 0, sizeof(VkImageBlit));
1011 
1012     // SkMipMap doesn't include the base level in the level count so we have to add 1
1013     uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1014     if (levelCount != tex->mipLevels()) {
1015         const GrVkResource* oldResource = tex->resource();
1016         oldResource->ref();
1017         // grab handle to the original image resource
1018         VkImage oldImage = tex->image();
1019 
1020         // change the original image's layout so we can copy from it
1021         tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1022                             VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1023 
1024         if (!tex->reallocForMipmap(this, levelCount)) {
1025             oldResource->unref(this);
1026             return;
1027         }
1028         // change the new image's layout so we can blit to it
1029         tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL,
1030                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1031 
1032         // Blit original image to top level of new image
1033         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1034         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1035         blitRegion.srcOffsets[1] = { width, height, 1 };
1036         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1037         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1038         blitRegion.dstOffsets[1] = { width, height, 1 };
1039 
1040         fCurrentCmdBuffer->blitImage(this,
1041                                      oldResource,
1042                                      oldImage,
1043                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1044                                      tex->resource(),
1045                                      tex->image(),
1046                                      VK_IMAGE_LAYOUT_GENERAL,
1047                                      1,
1048                                      &blitRegion,
1049                                      VK_FILTER_LINEAR);
1050 
1051         oldResource->unref(this);
1052     } else {
1053         // change layout of the layers so we can write to them.
1054         tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL,
1055                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1056     }
1057 
1058     // setup memory barrier
1059     SkASSERT(GrVkFormatIsSupported(tex->imageFormat()));
1060     VkImageAspectFlags aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
1061     VkImageMemoryBarrier imageMemoryBarrier = {
1062         VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,          // sType
1063         nullptr,                                         // pNext
1064         VK_ACCESS_TRANSFER_WRITE_BIT,                    // srcAccessMask
1065         VK_ACCESS_TRANSFER_READ_BIT,                     // dstAccessMask
1066         VK_IMAGE_LAYOUT_GENERAL,                         // oldLayout
1067         VK_IMAGE_LAYOUT_GENERAL,                         // newLayout
1068         VK_QUEUE_FAMILY_IGNORED,                         // srcQueueFamilyIndex
1069         VK_QUEUE_FAMILY_IGNORED,                         // dstQueueFamilyIndex
1070         tex->image(),                                    // image
1071         { aspectFlags, 0, 1, 0, 1 }                      // subresourceRange
1072     };
1073 
1074     // Blit the miplevels
1075     uint32_t mipLevel = 1;
1076     while (mipLevel < levelCount) {
1077         int prevWidth = width;
1078         int prevHeight = height;
1079         width = SkTMax(1, width / 2);
1080         height = SkTMax(1, height / 2);
1081 
1082         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1083         this->addImageMemoryBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
1084                                     false, &imageMemoryBarrier);
1085 
1086         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1087         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1088         blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1089         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1090         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1091         blitRegion.dstOffsets[1] = { width, height, 1 };
1092         fCurrentCmdBuffer->blitImage(this,
1093                                      *tex,
1094                                      *tex,
1095                                      1,
1096                                      &blitRegion,
1097                                      VK_FILTER_LINEAR);
1098         ++mipLevel;
1099     }
1100 }
1101 
1102 ////////////////////////////////////////////////////////////////////////////////
1103 
createStencilAttachmentForRenderTarget(const GrRenderTarget * rt,int width,int height)1104 GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
1105                                                                      int width,
1106                                                                      int height) {
1107     SkASSERT(width >= rt->width());
1108     SkASSERT(height >= rt->height());
1109 
1110     int samples = rt->numStencilSamples();
1111 
1112     const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat();
1113 
1114     GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
1115                                                                  width,
1116                                                                  height,
1117                                                                  samples,
1118                                                                  sFmt));
1119     fStats.incStencilAttachmentCreates();
1120     return stencil;
1121 }
1122 
1123 ////////////////////////////////////////////////////////////////////////////////
1124 
copy_testing_data(GrVkGpu * gpu,void * srcData,const GrVkAlloc & alloc,size_t bufferOffset,size_t srcRowBytes,size_t dstRowBytes,int h)1125 bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size_t bufferOffset,
1126                        size_t srcRowBytes, size_t dstRowBytes, int h) {
1127     // For Noncoherent buffers we want to make sure the range that we map, both offset and size,
1128     // are aligned to the nonCoherentAtomSize limit. We may have to move the initial offset back to
1129     // meet the alignment requirements. So we track how far we move back and then adjust the mapped
1130     // ptr back up so that this is opaque to the caller.
1131     VkDeviceSize mapSize = dstRowBytes * h;
1132     VkDeviceSize mapOffset = alloc.fOffset + bufferOffset;
1133     VkDeviceSize offsetDiff = 0;
1134     if (SkToBool(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag)) {
1135         VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
1136         offsetDiff = mapOffset & (alignment - 1);
1137         mapOffset = mapOffset - offsetDiff;
1138         // Make size of the map aligned to nonCoherentAtomSize
1139         mapSize = (mapSize + alignment - 1) & ~(alignment - 1);
1140     }
1141     SkASSERT(mapOffset >= alloc.fOffset);
1142     SkASSERT(mapSize + mapOffset <= alloc.fOffset + alloc.fSize);
1143     void* mapPtr;
1144     VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(),
1145                                                             alloc.fMemory,
1146                                                             mapOffset,
1147                                                             mapSize,
1148                                                             0,
1149                                                             &mapPtr));
1150     mapPtr = reinterpret_cast<char*>(mapPtr) + offsetDiff;
1151     if (err) {
1152         return false;
1153     }
1154 
1155     if (srcData) {
1156         // If there is no padding on dst we can do a single memcopy.
1157         // This assumes the srcData comes in with no padding.
1158         SkRectMemcpy(mapPtr, static_cast<size_t>(dstRowBytes),
1159                      srcData, srcRowBytes, srcRowBytes, h);
1160     } else {
1161         // If there is no srcdata we always copy 0's into the textures so that it is initialized
1162         // with some data.
1163         if (srcRowBytes == static_cast<size_t>(dstRowBytes)) {
1164             memset(mapPtr, 0, srcRowBytes * h);
1165         } else {
1166             for (int i = 0; i < h; ++i) {
1167                 memset(mapPtr, 0, srcRowBytes);
1168                 mapPtr = SkTAddOffset<void>(mapPtr, static_cast<size_t>(dstRowBytes));
1169             }
1170         }
1171     }
1172     GrVkMemory::FlushMappedAlloc(gpu, alloc, mapSize);
1173     GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
1174     return true;
1175 }
1176 
createTestingOnlyBackendTexture(void * srcData,int w,int h,GrPixelConfig config,bool isRenderTarget,GrMipMapped mipMapped)1177 GrBackendTexture GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
1178                                                           GrPixelConfig config,
1179                                                           bool isRenderTarget,
1180                                                           GrMipMapped mipMapped) {
1181 
1182     VkFormat pixelFormat;
1183     if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1184         return GrBackendTexture(); // invalid
1185     }
1186 
1187     bool linearTiling = false;
1188     if (!fVkCaps->isConfigTexturable(config)) {
1189         return GrBackendTexture(); // invalid
1190     }
1191 
1192     if (isRenderTarget && !fVkCaps->isConfigRenderable(config)) {
1193         return GrBackendTexture(); // invalid
1194     }
1195 
1196     // Currently we don't support uploading pixel data when mipped.
1197     if (srcData && GrMipMapped::kYes == mipMapped) {
1198         return GrBackendTexture(); // invalid
1199     }
1200 
1201     if (fVkCaps->isConfigTexturableLinearly(config) &&
1202         (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false)) &&
1203         GrMipMapped::kNo == mipMapped) {
1204         linearTiling = true;
1205     }
1206 
1207     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
1208     usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1209     usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1210     if (isRenderTarget) {
1211         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1212     }
1213 
1214     VkImage image = VK_NULL_HANDLE;
1215     GrVkAlloc alloc;
1216 
1217     VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
1218     VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
1219                                 ? VK_IMAGE_LAYOUT_PREINITIALIZED
1220                                 : VK_IMAGE_LAYOUT_UNDEFINED;
1221 
1222     // Create Image
1223     VkSampleCountFlagBits vkSamples;
1224     if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
1225         return GrBackendTexture(); // invalid
1226     }
1227 
1228     // Figure out the number of mip levels.
1229     uint32_t mipLevels = 1;
1230     if (GrMipMapped::kYes == mipMapped) {
1231         mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
1232     }
1233 
1234     const VkImageCreateInfo imageCreateInfo = {
1235         VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
1236         nullptr,                                     // pNext
1237         0,                                           // VkImageCreateFlags
1238         VK_IMAGE_TYPE_2D,                            // VkImageType
1239         pixelFormat,                                 // VkFormat
1240         { (uint32_t) w, (uint32_t) h, 1 },           // VkExtent3D
1241         mipLevels,                                   // mipLevels
1242         1,                                           // arrayLayers
1243         vkSamples,                                   // samples
1244         imageTiling,                                 // VkImageTiling
1245         usageFlags,                                  // VkImageUsageFlags
1246         VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
1247         0,                                           // queueFamilyCount
1248         0,                                           // pQueueFamilyIndices
1249         initialLayout                                // initialLayout
1250     };
1251 
1252     GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
1253 
1254     if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
1255         VK_CALL(DestroyImage(this->device(), image, nullptr));
1256         return GrBackendTexture(); // invalid
1257     }
1258 
1259     // We need to declare these early so that we can delete them at the end outside of the if block.
1260     GrVkAlloc bufferAlloc;
1261     VkBuffer buffer = VK_NULL_HANDLE;
1262 
1263     VkResult err;
1264     const VkCommandBufferAllocateInfo cmdInfo = {
1265         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
1266         nullptr,                                          // pNext
1267         fCmdPool,                                         // commandPool
1268         VK_COMMAND_BUFFER_LEVEL_PRIMARY,                  // level
1269         1                                                 // bufferCount
1270     };
1271 
1272     VkCommandBuffer cmdBuffer;
1273     err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
1274     if (err) {
1275         GrVkMemory::FreeImageMemory(this, false, alloc);
1276         VK_CALL(DestroyImage(fDevice, image, nullptr));
1277         return GrBackendTexture(); // invalid
1278     }
1279 
1280     VkCommandBufferBeginInfo cmdBufferBeginInfo;
1281     memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1282     cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1283     cmdBufferBeginInfo.pNext = nullptr;
1284     cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
1285     cmdBufferBeginInfo.pInheritanceInfo = nullptr;
1286 
1287     err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
1288     SkASSERT(!err);
1289 
1290     size_t bpp = GrBytesPerPixel(config);
1291     size_t rowCopyBytes = bpp * w;
1292     if (linearTiling) {
1293         const VkImageSubresource subres = {
1294             VK_IMAGE_ASPECT_COLOR_BIT,
1295             0,  // mipLevel
1296             0,  // arraySlice
1297         };
1298         VkSubresourceLayout layout;
1299 
1300         VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
1301 
1302         if (!copy_testing_data(this, srcData, alloc, 0, rowCopyBytes,
1303                                static_cast<size_t>(layout.rowPitch), h)) {
1304             GrVkMemory::FreeImageMemory(this, true, alloc);
1305             VK_CALL(DestroyImage(fDevice, image, nullptr));
1306             VK_CALL(EndCommandBuffer(cmdBuffer));
1307             VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
1308             return GrBackendTexture(); // invalid
1309         }
1310     } else {
1311         SkASSERT(w && h);
1312 
1313         SkTArray<size_t> individualMipOffsets(mipLevels);
1314         individualMipOffsets.push_back(0);
1315         size_t combinedBufferSize = w * bpp * h;
1316         int currentWidth = w;
1317         int currentHeight = h;
1318         // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
1319         // config. This works with the assumption that the bytes in pixel config is always a power
1320         // of 2.
1321         SkASSERT((bpp & (bpp - 1)) == 0);
1322         const size_t alignmentMask = 0x3 | (bpp - 1);
1323         for (uint32_t currentMipLevel = 1; currentMipLevel < mipLevels; currentMipLevel++) {
1324             currentWidth = SkTMax(1, currentWidth/2);
1325             currentHeight = SkTMax(1, currentHeight/2);
1326 
1327             const size_t trimmedSize = currentWidth * bpp * currentHeight;
1328             const size_t alignmentDiff = combinedBufferSize & alignmentMask;
1329             if (alignmentDiff != 0) {
1330                 combinedBufferSize += alignmentMask - alignmentDiff + 1;
1331             }
1332             individualMipOffsets.push_back(combinedBufferSize);
1333             combinedBufferSize += trimmedSize;
1334         }
1335 
1336         VkBufferCreateInfo bufInfo;
1337         memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
1338         bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1339         bufInfo.flags = 0;
1340         bufInfo.size = combinedBufferSize;
1341         bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1342         bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1343         bufInfo.queueFamilyIndexCount = 0;
1344         bufInfo.pQueueFamilyIndices = nullptr;
1345         err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
1346 
1347         if (err) {
1348             GrVkMemory::FreeImageMemory(this, false, alloc);
1349             VK_CALL(DestroyImage(fDevice, image, nullptr));
1350             VK_CALL(EndCommandBuffer(cmdBuffer));
1351             VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
1352             return GrBackendTexture(); // invalid
1353         }
1354 
1355         if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
1356                                                   true, &bufferAlloc)) {
1357             GrVkMemory::FreeImageMemory(this, false, alloc);
1358             VK_CALL(DestroyImage(fDevice, image, nullptr));
1359             VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1360             VK_CALL(EndCommandBuffer(cmdBuffer));
1361             VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
1362             return GrBackendTexture(); // invalid
1363         }
1364 
1365         currentWidth = w;
1366         currentHeight = h;
1367         for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
1368             SkASSERT(0 == currentMipLevel || !srcData);
1369             size_t currentRowBytes = bpp * currentWidth;
1370             size_t bufferOffset = individualMipOffsets[currentMipLevel];
1371             if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset,
1372                                    currentRowBytes, currentRowBytes, currentHeight)) {
1373                 GrVkMemory::FreeImageMemory(this, false, alloc);
1374                 VK_CALL(DestroyImage(fDevice, image, nullptr));
1375                 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
1376                 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1377                 VK_CALL(EndCommandBuffer(cmdBuffer));
1378                 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
1379                 return GrBackendTexture(); // invalid
1380             }
1381             currentWidth = SkTMax(1, currentWidth/2);
1382             currentHeight = SkTMax(1, currentHeight/2);
1383         }
1384 
1385         // Set image layout and add barrier
1386         VkImageMemoryBarrier barrier;
1387         memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
1388         barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1389         barrier.pNext = nullptr;
1390         barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
1391         barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1392         barrier.oldLayout = initialLayout;
1393         barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1394         barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1395         barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1396         barrier.image = image;
1397         barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
1398 
1399         VK_CALL(CmdPipelineBarrier(cmdBuffer,
1400                                    GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
1401                                    VK_PIPELINE_STAGE_TRANSFER_BIT,
1402                                    0,
1403                                    0, nullptr,
1404                                    0, nullptr,
1405                                    1, &barrier));
1406         initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1407 
1408         SkTArray<VkBufferImageCopy> regions(mipLevels);
1409 
1410         currentWidth = w;
1411         currentHeight = h;
1412         for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
1413             // Submit copy command
1414             VkBufferImageCopy& region = regions.push_back();
1415             memset(&region, 0, sizeof(VkBufferImageCopy));
1416             region.bufferOffset = individualMipOffsets[currentMipLevel];
1417             region.bufferRowLength = currentWidth;
1418             region.bufferImageHeight = currentHeight;
1419             region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1420             region.imageOffset = { 0, 0, 0 };
1421             region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
1422             currentWidth = SkTMax(1, currentWidth/2);
1423             currentHeight = SkTMax(1, currentHeight/2);
1424         }
1425 
1426         VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, regions.count(),
1427                                      regions.begin()));
1428     }
1429     // Change Image layout to shader read since if we use this texture as a borrowed textures within
1430     // Ganesh we require that its layout be set to that
1431     VkImageMemoryBarrier barrier;
1432     memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
1433     barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1434     barrier.pNext = nullptr;
1435     barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
1436     barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
1437     barrier.oldLayout = initialLayout;
1438     barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1439     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1440     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1441     barrier.image = image;
1442     barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
1443 
1444     VK_CALL(CmdPipelineBarrier(cmdBuffer,
1445                                GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
1446                                VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1447                                0,
1448                                0, nullptr,
1449                                0, nullptr,
1450                                1, &barrier));
1451 
1452     // End CommandBuffer
1453     err = VK_CALL(EndCommandBuffer(cmdBuffer));
1454     SkASSERT(!err);
1455 
1456     // Create Fence for queue
1457     VkFence fence;
1458     VkFenceCreateInfo fenceInfo;
1459     memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
1460     fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
1461 
1462     err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
1463     SkASSERT(!err);
1464 
1465     VkSubmitInfo submitInfo;
1466     memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1467     submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1468     submitInfo.pNext = nullptr;
1469     submitInfo.waitSemaphoreCount = 0;
1470     submitInfo.pWaitSemaphores = nullptr;
1471     submitInfo.pWaitDstStageMask = 0;
1472     submitInfo.commandBufferCount = 1;
1473     submitInfo.pCommandBuffers = &cmdBuffer;
1474     submitInfo.signalSemaphoreCount = 0;
1475     submitInfo.pSignalSemaphores = nullptr;
1476     err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
1477     SkASSERT(!err);
1478 
1479     err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
1480     if (VK_TIMEOUT == err) {
1481         GrVkMemory::FreeImageMemory(this, false, alloc);
1482         VK_CALL(DestroyImage(fDevice, image, nullptr));
1483         GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
1484         VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1485         VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
1486         VK_CALL(DestroyFence(fDevice, fence, nullptr));
1487         SkDebugf("Fence failed to signal: %d\n", err);
1488         SK_ABORT("failing");
1489     }
1490     SkASSERT(!err);
1491 
1492     // Clean up transfer resources
1493     if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
1494         GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
1495         VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1496     }
1497     VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
1498     VK_CALL(DestroyFence(fDevice, fence, nullptr));
1499 
1500 
1501     GrVkImageInfo info;
1502     info.fImage = image;
1503     info.fAlloc = alloc;
1504     info.fImageTiling = imageTiling;
1505     info.fImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1506     info.fFormat = pixelFormat;
1507     info.fLevelCount = mipLevels;
1508 
1509     return GrBackendTexture(w, h, info);
1510 }
1511 
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const1512 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1513     SkASSERT(kVulkan_GrBackend == tex.fBackend);
1514 
1515     const GrVkImageInfo* backend = tex.getVkImageInfo();
1516 
1517     if (backend && backend->fImage && backend->fAlloc.fMemory) {
1518         VkMemoryRequirements req;
1519         memset(&req, 0, sizeof(req));
1520         GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1521                                                                    backend->fImage,
1522                                                                    &req));
1523         // TODO: find a better check
1524         // This will probably fail with a different driver
1525         return (req.size > 0) && (req.size <= 8192 * 8192);
1526     }
1527 
1528     return false;
1529 }
1530 
deleteTestingOnlyBackendTexture(GrBackendTexture * tex,bool abandon)1531 void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendTexture* tex, bool abandon) {
1532     SkASSERT(kVulkan_GrBackend == tex->fBackend);
1533 
1534     const GrVkImageInfo* info = tex->getVkImageInfo();
1535 
1536     if (info && !abandon) {
1537         // something in the command buffer may still be using this, so force submit
1538         this->submitCommandBuffer(kForce_SyncQueue);
1539         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(info));
1540     }
1541 }
1542 
1543 ////////////////////////////////////////////////////////////////////////////////
1544 
addMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkMemoryBarrier * barrier) const1545 void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
1546                                VkPipelineStageFlags dstStageMask,
1547                                bool byRegion,
1548                                VkMemoryBarrier* barrier) const {
1549     SkASSERT(fCurrentCmdBuffer);
1550     fCurrentCmdBuffer->pipelineBarrier(this,
1551                                        srcStageMask,
1552                                        dstStageMask,
1553                                        byRegion,
1554                                        GrVkCommandBuffer::kMemory_BarrierType,
1555                                        barrier);
1556 }
1557 
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const1558 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1559                                      VkPipelineStageFlags dstStageMask,
1560                                      bool byRegion,
1561                                      VkBufferMemoryBarrier* barrier) const {
1562     SkASSERT(fCurrentCmdBuffer);
1563     fCurrentCmdBuffer->pipelineBarrier(this,
1564                                        srcStageMask,
1565                                        dstStageMask,
1566                                        byRegion,
1567                                        GrVkCommandBuffer::kBufferMemory_BarrierType,
1568                                        barrier);
1569 }
1570 
addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const1571 void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
1572                                     VkPipelineStageFlags dstStageMask,
1573                                     bool byRegion,
1574                                     VkImageMemoryBarrier* barrier) const {
1575     SkASSERT(fCurrentCmdBuffer);
1576     fCurrentCmdBuffer->pipelineBarrier(this,
1577                                        srcStageMask,
1578                                        dstStageMask,
1579                                        byRegion,
1580                                        GrVkCommandBuffer::kImageMemory_BarrierType,
1581                                        barrier);
1582 }
1583 
onFinishFlush(bool insertedSemaphore)1584 void GrVkGpu::onFinishFlush(bool insertedSemaphore) {
1585     // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
1586     // not effect what we do here.
1587     this->submitCommandBuffer(kSkip_SyncQueue);
1588 }
1589 
clearStencil(GrRenderTarget * target,int clearValue)1590 void GrVkGpu::clearStencil(GrRenderTarget* target, int clearValue) {
1591     if (!target) {
1592         return;
1593     }
1594     GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
1595     GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1596 
1597 
1598     VkClearDepthStencilValue vkStencilColor;
1599     vkStencilColor.depth = 0.0f;
1600     vkStencilColor.stencil = clearValue;
1601 
1602     vkStencil->setImageLayout(this,
1603                               VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1604                               VK_ACCESS_TRANSFER_WRITE_BIT,
1605                               VK_PIPELINE_STAGE_TRANSFER_BIT,
1606                               false);
1607 
1608     VkImageSubresourceRange subRange;
1609     memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1610     subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1611     subRange.baseMipLevel = 0;
1612     subRange.levelCount = 1;
1613     subRange.baseArrayLayer = 0;
1614     subRange.layerCount = 1;
1615 
1616     // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
1617     // draw. Thus we should look into using the load op functions on the render pass to clear out
1618     // the stencil there.
1619     fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
1620 }
1621 
can_copy_image(const GrSurface * dst,GrSurfaceOrigin dstOrigin,const GrSurface * src,GrSurfaceOrigin srcOrigin,const GrVkGpu * gpu)1622 inline bool can_copy_image(const GrSurface* dst, GrSurfaceOrigin dstOrigin,
1623                            const GrSurface* src, GrSurfaceOrigin srcOrigin,
1624                            const GrVkGpu* gpu) {
1625     const GrRenderTarget* dstRT = dst->asRenderTarget();
1626     const GrRenderTarget* srcRT = src->asRenderTarget();
1627     if (dstRT && srcRT) {
1628         if (srcRT->numColorSamples() != dstRT->numColorSamples()) {
1629             return false;
1630         }
1631     } else if (dstRT) {
1632         if (dstRT->numColorSamples() > 1) {
1633             return false;
1634         }
1635     } else if (srcRT) {
1636         if (srcRT->numColorSamples() > 1) {
1637             return false;
1638         }
1639     }
1640 
1641     // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1642     // as image usage flags.
1643     if (srcOrigin == dstOrigin &&
1644         GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) {
1645         return true;
1646     }
1647 
1648     return false;
1649 }
1650 
copySurfaceAsCopyImage(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)1651 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurfaceOrigin dstOrigin,
1652                                      GrSurface* src, GrSurfaceOrigin srcOrigin,
1653                                      GrVkImage* dstImage,
1654                                      GrVkImage* srcImage,
1655                                      const SkIRect& srcRect,
1656                                      const SkIPoint& dstPoint) {
1657     SkASSERT(can_copy_image(dst, dstOrigin, src, srcOrigin, this));
1658 
1659     // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1660     // the cache is flushed since it is only being written to.
1661     dstImage->setImageLayout(this,
1662                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1663                              VK_ACCESS_TRANSFER_WRITE_BIT,
1664                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1665                              false);
1666 
1667     srcImage->setImageLayout(this,
1668                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1669                              VK_ACCESS_TRANSFER_READ_BIT,
1670                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1671                              false);
1672 
1673     // Flip rect if necessary
1674     SkIRect srcVkRect = srcRect;
1675     int32_t dstY = dstPoint.fY;
1676 
1677     if (kBottomLeft_GrSurfaceOrigin == srcOrigin) {
1678         SkASSERT(kBottomLeft_GrSurfaceOrigin == dstOrigin);
1679         srcVkRect.fTop = src->height() - srcRect.fBottom;
1680         srcVkRect.fBottom =  src->height() - srcRect.fTop;
1681         dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1682     }
1683 
1684     VkImageCopy copyRegion;
1685     memset(&copyRegion, 0, sizeof(VkImageCopy));
1686     copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1687     copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1688     copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1689     copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1690     copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 };
1691 
1692     fCurrentCmdBuffer->copyImage(this,
1693                                  srcImage,
1694                                  VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1695                                  dstImage,
1696                                  VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1697                                  1,
1698                                  &copyRegion);
1699 
1700     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
1701                                         srcRect.width(), srcRect.height());
1702     this->didWriteToSurface(dst, dstOrigin, &dstRect);
1703 }
1704 
can_copy_as_blit(const GrSurface * dst,const GrSurface * src,const GrVkImage * dstImage,const GrVkImage * srcImage,const GrVkGpu * gpu)1705 inline bool can_copy_as_blit(const GrSurface* dst,
1706                              const GrSurface* src,
1707                              const GrVkImage* dstImage,
1708                              const GrVkImage* srcImage,
1709                              const GrVkGpu* gpu) {
1710     // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1711     // as image usage flags.
1712     const GrVkCaps& caps = gpu->vkCaps();
1713     if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) ||
1714         !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) {
1715         return false;
1716     }
1717 
1718     // We cannot blit images that are multisampled. Will need to figure out if we can blit the
1719     // resolved msaa though.
1720     if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1721         (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1722         return false;
1723     }
1724 
1725     return true;
1726 }
1727 
copySurfaceAsBlit(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)1728 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, GrSurfaceOrigin dstOrigin,
1729                                 GrSurface* src, GrSurfaceOrigin srcOrigin,
1730                                 GrVkImage* dstImage,
1731                                 GrVkImage* srcImage,
1732                                 const SkIRect& srcRect,
1733                                 const SkIPoint& dstPoint) {
1734     SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this));
1735 
1736     dstImage->setImageLayout(this,
1737                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1738                              VK_ACCESS_TRANSFER_WRITE_BIT,
1739                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1740                              false);
1741 
1742     srcImage->setImageLayout(this,
1743                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1744                              VK_ACCESS_TRANSFER_READ_BIT,
1745                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1746                              false);
1747 
1748     // Flip rect if necessary
1749     SkIRect srcVkRect;
1750     srcVkRect.fLeft = srcRect.fLeft;
1751     srcVkRect.fRight = srcRect.fRight;
1752     SkIRect dstRect;
1753     dstRect.fLeft = dstPoint.fX;
1754     dstRect.fRight = dstPoint.fX + srcRect.width();
1755 
1756     if (kBottomLeft_GrSurfaceOrigin == srcOrigin) {
1757         srcVkRect.fTop = src->height() - srcRect.fBottom;
1758         srcVkRect.fBottom = src->height() - srcRect.fTop;
1759     } else {
1760         srcVkRect.fTop = srcRect.fTop;
1761         srcVkRect.fBottom = srcRect.fBottom;
1762     }
1763 
1764     if (kBottomLeft_GrSurfaceOrigin == dstOrigin) {
1765         dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height();
1766     } else {
1767         dstRect.fTop = dstPoint.fY;
1768     }
1769     dstRect.fBottom = dstRect.fTop + srcVkRect.height();
1770 
1771     // If we have different origins, we need to flip the top and bottom of the dst rect so that we
1772     // get the correct origintation of the copied data.
1773     if (srcOrigin != dstOrigin) {
1774         SkTSwap(dstRect.fTop, dstRect.fBottom);
1775     }
1776 
1777     VkImageBlit blitRegion;
1778     memset(&blitRegion, 0, sizeof(VkImageBlit));
1779     blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1780     blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1781     blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 1 };
1782     blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1783     blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
1784     blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
1785 
1786     fCurrentCmdBuffer->blitImage(this,
1787                                  *srcImage,
1788                                  *dstImage,
1789                                  1,
1790                                  &blitRegion,
1791                                  VK_FILTER_NEAREST); // We never scale so any filter works here
1792 
1793     this->didWriteToSurface(dst, dstOrigin, &dstRect);
1794 }
1795 
can_copy_as_resolve(const GrSurface * dst,GrSurfaceOrigin dstOrigin,const GrSurface * src,GrSurfaceOrigin srcOrigin,const GrVkGpu * gpu)1796 inline bool can_copy_as_resolve(const GrSurface* dst, GrSurfaceOrigin dstOrigin,
1797                                 const GrSurface* src, GrSurfaceOrigin srcOrigin,
1798                                 const GrVkGpu* gpu) {
1799     // Our src must be a multisampled render target
1800     if (!src->asRenderTarget() || 1 == src->asRenderTarget()->numColorSamples()) {
1801         return false;
1802     }
1803 
1804     // The dst must not be a multisampled render target, expect in the case where the dst is the
1805     // resolve texture connected to the msaa src. We check for this in case we are copying a part of
1806     // a surface to a different region in the same surface.
1807     if (dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1 && dst != src) {
1808         return false;
1809     }
1810 
1811     // Surfaces must have the same origin.
1812     if (srcOrigin != dstOrigin) {
1813         return false;
1814     }
1815 
1816     return true;
1817 }
1818 
copySurfaceAsResolve(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & origSrcRect,const SkIPoint & origDstPoint)1819 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src,
1820                                    GrSurfaceOrigin srcOrigin, const SkIRect& origSrcRect,
1821                                    const SkIPoint& origDstPoint) {
1822     GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
1823     SkIRect srcRect = origSrcRect;
1824     SkIPoint dstPoint = origDstPoint;
1825     if (kBottomLeft_GrSurfaceOrigin == srcOrigin) {
1826         SkASSERT(kBottomLeft_GrSurfaceOrigin == dstOrigin);
1827         srcRect = {origSrcRect.fLeft, src->height() - origSrcRect.fBottom,
1828                    origSrcRect.fRight, src->height() - origSrcRect.fTop};
1829         dstPoint.fY = dst->height() - dstPoint.fY - srcRect.height();
1830     }
1831     this->resolveImage(dst, srcRT, srcRect, dstPoint);
1832 }
1833 
onCopySurface(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint)1834 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin,
1835                             GrSurface* src, GrSurfaceOrigin srcOrigin,
1836                             const SkIRect& srcRect,
1837                             const SkIPoint& dstPoint) {
1838     if (can_copy_as_resolve(dst, dstOrigin, src, srcOrigin, this)) {
1839         this->copySurfaceAsResolve(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint);
1840         return true;
1841     }
1842 
1843     if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) {
1844         this->submitCommandBuffer(GrVkGpu::kSkip_SyncQueue);
1845     }
1846 
1847     if (fCopyManager.copySurfaceAsDraw(this, dst, dstOrigin, src, srcOrigin, srcRect, dstPoint)) {
1848         auto dstRect = srcRect.makeOffset(dstPoint.fX, dstPoint.fY);
1849         this->didWriteToSurface(dst, dstOrigin, &dstRect);
1850         return true;
1851     }
1852 
1853     GrVkImage* dstImage;
1854     GrVkImage* srcImage;
1855     GrRenderTarget* dstRT = dst->asRenderTarget();
1856     if (dstRT) {
1857         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
1858         dstImage = vkRT->numColorSamples() > 1 ? vkRT->msaaImage() : vkRT;
1859     } else {
1860         SkASSERT(dst->asTexture());
1861         dstImage = static_cast<GrVkTexture*>(dst->asTexture());
1862     }
1863     GrRenderTarget* srcRT = src->asRenderTarget();
1864     if (srcRT) {
1865         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
1866         srcImage = vkRT->numColorSamples() > 1 ? vkRT->msaaImage() : vkRT;
1867     } else {
1868         SkASSERT(src->asTexture());
1869         srcImage = static_cast<GrVkTexture*>(src->asTexture());
1870     }
1871 
1872     // For borrowed textures, we *only* want to copy using draws (to avoid layout changes)
1873     if (srcImage->isBorrowed()) {
1874         return false;
1875     }
1876 
1877     if (can_copy_image(dst, dstOrigin, src, srcOrigin, this)) {
1878         this->copySurfaceAsCopyImage(dst, dstOrigin, src, srcOrigin, dstImage, srcImage,
1879                                      srcRect, dstPoint);
1880         return true;
1881     }
1882 
1883     if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) {
1884         this->copySurfaceAsBlit(dst, dstOrigin, src, srcOrigin, dstImage, srcImage,
1885                                 srcRect, dstPoint);
1886         return true;
1887     }
1888 
1889     return false;
1890 }
1891 
onGetReadPixelsInfo(GrSurface * srcSurface,GrSurfaceOrigin srcOrigin,int width,int height,size_t rowBytes,GrColorType dstColorType,DrawPreference * drawPreference,ReadPixelTempDrawInfo * tempDrawInfo)1892 bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, GrSurfaceOrigin srcOrigin, int width,
1893                                   int height, size_t rowBytes, GrColorType dstColorType,
1894                                   DrawPreference* drawPreference,
1895                                   ReadPixelTempDrawInfo* tempDrawInfo) {
1896     // We don't want to introduce a sRGB conversion if we trigger a draw.
1897     auto dstConfigSRGBEncoded = GrPixelConfigIsSRGBEncoded(srcSurface->config());
1898     if (*drawPreference != kNoDraw_DrawPreference) {
1899         // We assume the base class has only inserted a draw for sRGB reasons. So the
1900         // the temp surface has the config of the dst data. There is no swizzling nor dst config.
1901         // spoofing.
1902         SkASSERT(tempDrawInfo->fReadColorType == dstColorType);
1903         SkASSERT(GrPixelConfigToColorType(tempDrawInfo->fTempSurfaceDesc.fConfig) == dstColorType);
1904         SkASSERT(tempDrawInfo->fSwizzle == GrSwizzle::RGBA());
1905         // Don't undo a sRGB conversion introduced by our caller via an intermediate draw.
1906         dstConfigSRGBEncoded = GrPixelConfigIsSRGBEncoded(tempDrawInfo->fTempSurfaceDesc.fConfig);
1907     }
1908     if (GrColorTypeIsAlphaOnly(dstColorType)) {
1909         dstConfigSRGBEncoded = GrSRGBEncoded::kNo;
1910     }
1911 
1912     if (GrPixelConfigToColorType(srcSurface->config()) == dstColorType) {
1913         return true;
1914     }
1915 
1916     // Any config change requires a draw
1917     ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
1918     tempDrawInfo->fTempSurfaceDesc.fConfig =
1919             GrColorTypeToPixelConfig(dstColorType, dstConfigSRGBEncoded);
1920     tempDrawInfo->fReadColorType = dstColorType;
1921 
1922     return kUnknown_GrPixelConfig != tempDrawInfo->fTempSurfaceDesc.fConfig;
1923 }
1924 
onReadPixels(GrSurface * surface,GrSurfaceOrigin origin,int left,int top,int width,int height,GrColorType dstColorType,void * buffer,size_t rowBytes)1925 bool GrVkGpu::onReadPixels(GrSurface* surface, GrSurfaceOrigin origin, int left, int top, int width,
1926                            int height, GrColorType dstColorType, void* buffer, size_t rowBytes) {
1927     if (GrPixelConfigToColorType(surface->config()) != dstColorType) {
1928         return false;
1929     }
1930 
1931     GrVkImage* image = nullptr;
1932     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
1933     if (rt) {
1934         // resolve the render target if necessary
1935         switch (rt->getResolveType()) {
1936             case GrVkRenderTarget::kCantResolve_ResolveType:
1937                 return false;
1938             case GrVkRenderTarget::kAutoResolves_ResolveType:
1939                 break;
1940             case GrVkRenderTarget::kCanResolve_ResolveType:
1941                 this->internalResolveRenderTarget(rt, false);
1942                 break;
1943             default:
1944                 SK_ABORT("Unknown resolve type");
1945         }
1946         image = rt;
1947     } else {
1948         image = static_cast<GrVkTexture*>(surface->asTexture());
1949     }
1950 
1951     if (!image) {
1952         return false;
1953     }
1954 
1955     // Change layout of our target so it can be used as copy
1956     image->setImageLayout(this,
1957                           VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1958                           VK_ACCESS_TRANSFER_READ_BIT,
1959                           VK_PIPELINE_STAGE_TRANSFER_BIT,
1960                           false);
1961 
1962     int bpp = GrColorTypeBytesPerPixel(dstColorType);
1963     size_t tightRowBytes = bpp * width;
1964     bool flipY = kBottomLeft_GrSurfaceOrigin == origin;
1965 
1966     VkBufferImageCopy region;
1967     memset(&region, 0, sizeof(VkBufferImageCopy));
1968 
1969     bool copyFromOrigin = this->vkCaps().mustDoCopiesFromOrigin();
1970     if (copyFromOrigin) {
1971         region.imageOffset = { 0, 0, 0 };
1972         region.imageExtent = { (uint32_t)(left + width),
1973                                (uint32_t)(flipY ? surface->height() - top : top + height),
1974                                1
1975                              };
1976     } else {
1977         VkOffset3D offset = {
1978             left,
1979             flipY ? surface->height() - top - height : top,
1980             0
1981         };
1982         region.imageOffset = offset;
1983         region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1984     }
1985 
1986     size_t transBufferRowBytes = bpp * region.imageExtent.width;
1987     size_t imageRows = bpp * region.imageExtent.height;
1988     GrVkTransferBuffer* transferBuffer =
1989             static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * imageRows,
1990                                                                 kXferGpuToCpu_GrBufferType,
1991                                                                 kStream_GrAccessPattern));
1992 
1993     // Copy the image to a buffer so we can map it to cpu memory
1994     region.bufferOffset = transferBuffer->offset();
1995     region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
1996     region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1997     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1998 
1999     fCurrentCmdBuffer->copyImageToBuffer(this,
2000                                          image,
2001                                          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2002                                          transferBuffer,
2003                                          1,
2004                                          &region);
2005 
2006     // make sure the copy to buffer has finished
2007     transferBuffer->addMemoryBarrier(this,
2008                                      VK_ACCESS_TRANSFER_WRITE_BIT,
2009                                      VK_ACCESS_HOST_READ_BIT,
2010                                      VK_PIPELINE_STAGE_TRANSFER_BIT,
2011                                      VK_PIPELINE_STAGE_HOST_BIT,
2012                                      false);
2013 
2014     // We need to submit the current command buffer to the Queue and make sure it finishes before
2015     // we can copy the data out of the buffer.
2016     this->submitCommandBuffer(kForce_SyncQueue);
2017     void* mappedMemory = transferBuffer->map();
2018     GrVkMemory::InvalidateMappedAlloc(this, transferBuffer->alloc());
2019 
2020     if (copyFromOrigin) {
2021         uint32_t skipRows = region.imageExtent.height - height;
2022         mappedMemory = (char*)mappedMemory + transBufferRowBytes * skipRows + bpp * left;
2023     }
2024 
2025     if (flipY) {
2026         const char* srcRow = reinterpret_cast<const char*>(mappedMemory);
2027         char* dstRow = reinterpret_cast<char*>(buffer)+(height - 1) * rowBytes;
2028         for (int y = 0; y < height; y++) {
2029             memcpy(dstRow, srcRow, tightRowBytes);
2030             srcRow += transBufferRowBytes;
2031             dstRow -= rowBytes;
2032         }
2033     } else {
2034         SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, height);
2035     }
2036 
2037     transferBuffer->unmap();
2038     transferBuffer->unref();
2039     return true;
2040 }
2041 
2042 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
2043 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
2044 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)2045 void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds,
2046                                   const VkExtent2D& granularity, int maxWidth, int maxHeight) {
2047     // Adjust Width
2048     if ((0 != granularity.width && 1 != granularity.width)) {
2049         // Start with the right side of rect so we know if we end up going pass the maxWidth.
2050         int rightAdj = srcBounds.fRight % granularity.width;
2051         if (rightAdj != 0) {
2052             rightAdj = granularity.width - rightAdj;
2053         }
2054         dstBounds->fRight = srcBounds.fRight + rightAdj;
2055         if (dstBounds->fRight > maxWidth) {
2056             dstBounds->fRight = maxWidth;
2057             dstBounds->fLeft = 0;
2058         } else {
2059             dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
2060         }
2061     } else {
2062         dstBounds->fLeft = srcBounds.fLeft;
2063         dstBounds->fRight = srcBounds.fRight;
2064     }
2065 
2066     // Adjust height
2067     if ((0 != granularity.height && 1 != granularity.height)) {
2068         // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
2069         int bottomAdj = srcBounds.fBottom % granularity.height;
2070         if (bottomAdj != 0) {
2071             bottomAdj = granularity.height - bottomAdj;
2072         }
2073         dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
2074         if (dstBounds->fBottom > maxHeight) {
2075             dstBounds->fBottom = maxHeight;
2076             dstBounds->fTop = 0;
2077         } else {
2078             dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
2079         }
2080     } else {
2081         dstBounds->fTop = srcBounds.fTop;
2082         dstBounds->fBottom = srcBounds.fBottom;
2083     }
2084 }
2085 
submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer * > & buffers,const GrVkRenderPass * renderPass,const VkClearValue * colorClear,GrVkRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2086 void GrVkGpu::submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>& buffers,
2087                                            const GrVkRenderPass* renderPass,
2088                                            const VkClearValue* colorClear,
2089                                            GrVkRenderTarget* target, GrSurfaceOrigin origin,
2090                                            const SkIRect& bounds) {
2091     const SkIRect* pBounds = &bounds;
2092     SkIRect flippedBounds;
2093     if (kBottomLeft_GrSurfaceOrigin == origin) {
2094         flippedBounds = bounds;
2095         flippedBounds.fTop = target->height() - bounds.fBottom;
2096         flippedBounds.fBottom = target->height() - bounds.fTop;
2097         pBounds = &flippedBounds;
2098     }
2099 
2100     // The bounds we use for the render pass should be of the granularity supported
2101     // by the device.
2102     const VkExtent2D& granularity = renderPass->granularity();
2103     SkIRect adjustedBounds;
2104     if ((0 != granularity.width && 1 != granularity.width) ||
2105         (0 != granularity.height && 1 != granularity.height)) {
2106         adjust_bounds_to_granularity(&adjustedBounds, *pBounds, granularity,
2107                                      target->width(), target->height());
2108         pBounds = &adjustedBounds;
2109     }
2110 
2111 #ifdef SK_DEBUG
2112     uint32_t index;
2113     bool result = renderPass->colorAttachmentIndex(&index);
2114     SkASSERT(result && 0 == index);
2115     result = renderPass->stencilAttachmentIndex(&index);
2116     if (result) {
2117         SkASSERT(1 == index);
2118     }
2119 #endif
2120     VkClearValue clears[2];
2121     clears[0].color = colorClear->color;
2122     clears[1].depthStencil.depth = 0.0f;
2123     clears[1].depthStencil.stencil = 0;
2124 
2125     fCurrentCmdBuffer->beginRenderPass(this, renderPass, clears, *target, *pBounds, true);
2126     for (int i = 0; i < buffers.count(); ++i) {
2127         fCurrentCmdBuffer->executeCommands(this, buffers[i]);
2128     }
2129     fCurrentCmdBuffer->endRenderPass(this);
2130 
2131     this->didWriteToSurface(target, origin, &bounds);
2132 }
2133 
insertFence()2134 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2135     VkFenceCreateInfo createInfo;
2136     memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2137     createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2138     createInfo.pNext = nullptr;
2139     createInfo.flags = 0;
2140     VkFence fence = VK_NULL_HANDLE;
2141 
2142     VK_CALL_ERRCHECK(CreateFence(this->device(), &createInfo, nullptr, &fence));
2143     VK_CALL(QueueSubmit(this->queue(), 0, nullptr, fence));
2144 
2145     GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(VkFence));
2146     return (GrFence)fence;
2147 }
2148 
waitFence(GrFence fence,uint64_t timeout)2149 bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) {
2150     SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2151 
2152     VkResult result = VK_CALL(WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
2153     return (VK_SUCCESS == result);
2154 }
2155 
deleteFence(GrFence fence) const2156 void GrVkGpu::deleteFence(GrFence fence) const {
2157     VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2158 }
2159 
makeSemaphore(bool isOwned)2160 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2161     return GrVkSemaphore::Make(this, isOwned);
2162 }
2163 
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)2164 sk_sp<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2165                                                  GrResourceProvider::SemaphoreWrapType wrapType,
2166                                                  GrWrapOwnership ownership) {
2167     return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2168 }
2169 
insertSemaphore(sk_sp<GrSemaphore> semaphore,bool flush)2170 void GrVkGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) {
2171     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
2172 
2173     GrVkSemaphore::Resource* resource = vkSem->getResource();
2174     if (resource->shouldSignal()) {
2175         resource->ref();
2176         fSemaphoresToSignal.push_back(resource);
2177     }
2178 
2179     if (flush) {
2180         this->submitCommandBuffer(kSkip_SyncQueue);
2181     }
2182 }
2183 
waitSemaphore(sk_sp<GrSemaphore> semaphore)2184 void GrVkGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
2185     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
2186 
2187     GrVkSemaphore::Resource* resource = vkSem->getResource();
2188     if (resource->shouldWait()) {
2189         resource->ref();
2190         fSemaphoresToWaitOn.push_back(resource);
2191     }
2192 }
2193 
prepareTextureForCrossContextUsage(GrTexture * texture)2194 sk_sp<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2195     SkASSERT(texture);
2196     GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
2197     vkTexture->setImageLayout(this,
2198                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2199                               VK_ACCESS_SHADER_READ_BIT,
2200                               VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2201                               false);
2202     this->submitCommandBuffer(kSkip_SyncQueue);
2203 
2204     // The image layout change serves as a barrier, so no semaphore is needed
2205     return nullptr;
2206 }
2207 
2208