1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/private/SkTo.h"
14 #include "src/core/SkConvertPixels.h"
15 #include "src/core/SkMipMap.h"
16 #include "src/gpu/GrContextPriv.h"
17 #include "src/gpu/GrDataUtils.h"
18 #include "src/gpu/GrGeometryProcessor.h"
19 #include "src/gpu/GrGpuResourceCacheAccess.h"
20 #include "src/gpu/GrMesh.h"
21 #include "src/gpu/GrNativeRect.h"
22 #include "src/gpu/GrPipeline.h"
23 #include "src/gpu/GrRenderTargetContext.h"
24 #include "src/gpu/GrRenderTargetPriv.h"
25 #include "src/gpu/GrTexturePriv.h"
26 #include "src/gpu/SkGpuDevice.h"
27 #include "src/gpu/SkGr.h"
28 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
29 #include "src/gpu/vk/GrVkCommandBuffer.h"
30 #include "src/gpu/vk/GrVkCommandPool.h"
31 #include "src/gpu/vk/GrVkImage.h"
32 #include "src/gpu/vk/GrVkIndexBuffer.h"
33 #include "src/gpu/vk/GrVkInterface.h"
34 #include "src/gpu/vk/GrVkMemory.h"
35 #include "src/gpu/vk/GrVkOpsRenderPass.h"
36 #include "src/gpu/vk/GrVkPipeline.h"
37 #include "src/gpu/vk/GrVkPipelineState.h"
38 #include "src/gpu/vk/GrVkRenderPass.h"
39 #include "src/gpu/vk/GrVkResourceProvider.h"
40 #include "src/gpu/vk/GrVkSemaphore.h"
41 #include "src/gpu/vk/GrVkTexture.h"
42 #include "src/gpu/vk/GrVkTextureRenderTarget.h"
43 #include "src/gpu/vk/GrVkTransferBuffer.h"
44 #include "src/gpu/vk/GrVkVertexBuffer.h"
45 #include "src/image/SkImage_Gpu.h"
46 #include "src/image/SkSurface_Gpu.h"
47 #include "src/sksl/SkSLCompiler.h"
48
49 #include "include/gpu/vk/GrVkExtensions.h"
50 #include "include/gpu/vk/GrVkTypes.h"
51
52 #include <utility>
53
54 #if !defined(SK_BUILD_FOR_WIN)
55 #include <unistd.h>
56 #endif // !defined(SK_BUILD_FOR_WIN)
57
58 #if defined(SK_BUILD_FOR_WIN) && defined(SK_DEBUG)
59 #include "src/core/SkLeanWindows.h"
60 #endif
61
62 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
63 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
64 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
65
Make(const GrVkBackendContext & backendContext,const GrContextOptions & options,GrContext * context)66 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
67 const GrContextOptions& options, GrContext* context) {
68 if (backendContext.fInstance == VK_NULL_HANDLE ||
69 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
70 backendContext.fDevice == VK_NULL_HANDLE ||
71 backendContext.fQueue == VK_NULL_HANDLE) {
72 return nullptr;
73 }
74 if (!backendContext.fGetProc) {
75 return nullptr;
76 }
77
78 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
79 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
80 backendContext.fGetProc("vkEnumerateInstanceVersion",
81 VK_NULL_HANDLE, VK_NULL_HANDLE));
82 uint32_t instanceVersion = 0;
83 if (!localEnumerateInstanceVersion) {
84 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
85 } else {
86 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
87 if (err) {
88 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
89 return nullptr;
90 }
91 }
92
93 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
94 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
95 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
96 backendContext.fInstance,
97 VK_NULL_HANDLE));
98
99 if (!localGetPhysicalDeviceProperties) {
100 return nullptr;
101 }
102 VkPhysicalDeviceProperties physDeviceProperties;
103 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
104 uint32_t physDevVersion = physDeviceProperties.apiVersion;
105
106 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
107 : instanceVersion;
108
109 instanceVersion = SkTMin(instanceVersion, apiVersion);
110 physDevVersion = SkTMin(physDevVersion, apiVersion);
111
112 sk_sp<const GrVkInterface> interface;
113
114 if (backendContext.fVkExtensions) {
115 interface.reset(new GrVkInterface(backendContext.fGetProc,
116 backendContext.fInstance,
117 backendContext.fDevice,
118 instanceVersion,
119 physDevVersion,
120 backendContext.fVkExtensions));
121 if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
122 return nullptr;
123 }
124 } else {
125 GrVkExtensions extensions;
126 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
127 // need to know if this is enabled to know if we can transition to a present layout when
128 // flushing a surface.
129 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
130 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
131 extensions.init(backendContext.fGetProc, backendContext.fInstance,
132 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
133 }
134 interface.reset(new GrVkInterface(backendContext.fGetProc,
135 backendContext.fInstance,
136 backendContext.fDevice,
137 instanceVersion,
138 physDevVersion,
139 &extensions));
140 if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
141 return nullptr;
142 }
143 }
144
145 sk_sp<GrVkGpu> vkGpu(new GrVkGpu(context, options, backendContext, interface,
146 instanceVersion, physDevVersion));
147 if (backendContext.fProtectedContext == GrProtected::kYes &&
148 !vkGpu->vkCaps().supportsProtectedMemory()) {
149 return nullptr;
150 }
151 return vkGpu;
152 }
153
154 ////////////////////////////////////////////////////////////////////////////////
155
GrVkGpu(GrContext * context,const GrContextOptions & options,const GrVkBackendContext & backendContext,sk_sp<const GrVkInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion)156 GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
157 const GrVkBackendContext& backendContext, sk_sp<const GrVkInterface> interface,
158 uint32_t instanceVersion, uint32_t physicalDeviceVersion)
159 : INHERITED(context)
160 , fInterface(std::move(interface))
161 , fMemoryAllocator(backendContext.fMemoryAllocator)
162 , fInstance(backendContext.fInstance)
163 , fPhysicalDevice(backendContext.fPhysicalDevice)
164 , fDevice(backendContext.fDevice)
165 , fQueue(backendContext.fQueue)
166 , fQueueIndex(backendContext.fGraphicsQueueIndex)
167 , fResourceProvider(this)
168 , fDisconnected(false)
169 , fProtectedContext(backendContext.fProtectedContext) {
170 SkASSERT(!backendContext.fOwnsInstanceAndDevice);
171
172 if (!fMemoryAllocator) {
173 // We were not given a memory allocator at creation
174 fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(backendContext.fPhysicalDevice,
175 fDevice, fInterface));
176 }
177
178 fCompiler = new SkSL::Compiler();
179
180 if (backendContext.fDeviceFeatures2) {
181 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
182 *backendContext.fDeviceFeatures2, instanceVersion,
183 physicalDeviceVersion,
184 *backendContext.fVkExtensions, fProtectedContext));
185 } else if (backendContext.fDeviceFeatures) {
186 VkPhysicalDeviceFeatures2 features2;
187 features2.pNext = nullptr;
188 features2.features = *backendContext.fDeviceFeatures;
189 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
190 features2, instanceVersion, physicalDeviceVersion,
191 *backendContext.fVkExtensions, fProtectedContext));
192 } else {
193 VkPhysicalDeviceFeatures2 features;
194 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
195 features.pNext = nullptr;
196 if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
197 features.features.geometryShader = true;
198 }
199 if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
200 features.features.dualSrcBlend = true;
201 }
202 if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
203 features.features.sampleRateShading = true;
204 }
205 GrVkExtensions extensions;
206 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
207 // need to know if this is enabled to know if we can transition to a present layout when
208 // flushing a surface.
209 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
210 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
211 extensions.init(backendContext.fGetProc, backendContext.fInstance,
212 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
213 }
214 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
215 features, instanceVersion, physicalDeviceVersion, extensions,
216 fProtectedContext));
217 }
218 fCaps.reset(SkRef(fVkCaps.get()));
219
220 VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
221 VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
222
223 fResourceProvider.init();
224
225 fCmdPool = fResourceProvider.findOrCreateCommandPool();
226 fCurrentCmdBuffer = fCmdPool->getPrimaryCommandBuffer();
227 SkASSERT(fCurrentCmdBuffer);
228 fCurrentCmdBuffer->begin(this);
229 }
230
destroyResources()231 void GrVkGpu::destroyResources() {
232 if (fCmdPool) {
233 fCmdPool->getPrimaryCommandBuffer()->end(this);
234 fCmdPool->close();
235 }
236
237 // wait for all commands to finish
238 VkResult res = VK_CALL(QueueWaitIdle(fQueue));
239
240 // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
241 // on the command buffers even though they have completed. This causes an assert to fire when
242 // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
243 // sleep to make sure the fence signals.
244 #ifdef SK_DEBUG
245 if (this->vkCaps().mustSleepOnTearDown()) {
246 #if defined(SK_BUILD_FOR_WIN)
247 Sleep(10); // In milliseconds
248 #else
249 sleep(1); // In seconds
250 #endif
251 }
252 #endif
253
254 #ifdef SK_DEBUG
255 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
256 #endif
257
258 if (fCmdPool) {
259 fCmdPool->unref(this);
260 fCmdPool = nullptr;
261 }
262
263 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
264 fSemaphoresToWaitOn[i]->unref(this);
265 }
266 fSemaphoresToWaitOn.reset();
267
268 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
269 fSemaphoresToSignal[i]->unref(this);
270 }
271 fSemaphoresToSignal.reset();
272
273 // must call this just before we destroy the command pool and VkDevice
274 fResourceProvider.destroyResources(VK_ERROR_DEVICE_LOST == res);
275
276 fMemoryAllocator.reset();
277
278 fQueue = VK_NULL_HANDLE;
279 fDevice = VK_NULL_HANDLE;
280 fInstance = VK_NULL_HANDLE;
281 }
282
~GrVkGpu()283 GrVkGpu::~GrVkGpu() {
284 if (!fDisconnected) {
285 this->destroyResources();
286 }
287 delete fCompiler;
288 }
289
290
disconnect(DisconnectType type)291 void GrVkGpu::disconnect(DisconnectType type) {
292 INHERITED::disconnect(type);
293 if (!fDisconnected) {
294 if (DisconnectType::kCleanup == type) {
295 this->destroyResources();
296 } else {
297 if (fCmdPool) {
298 fCmdPool->unrefAndAbandon();
299 fCmdPool = nullptr;
300 }
301 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
302 fSemaphoresToWaitOn[i]->unrefAndAbandon();
303 }
304 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
305 fSemaphoresToSignal[i]->unrefAndAbandon();
306 }
307
308 // must call this just before we destroy the command pool and VkDevice
309 fResourceProvider.abandonResources();
310
311 fMemoryAllocator.reset();
312 }
313 fSemaphoresToWaitOn.reset();
314 fSemaphoresToSignal.reset();
315 fCurrentCmdBuffer = nullptr;
316 fDisconnected = true;
317 }
318 }
319
320 ///////////////////////////////////////////////////////////////////////////////
321
getOpsRenderPass(GrRenderTarget * rt,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrTextureProxy *,true> & sampledProxies)322 GrOpsRenderPass* GrVkGpu::getOpsRenderPass(
323 GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
324 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
325 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
326 const SkTArray<GrTextureProxy*, true>& sampledProxies) {
327 if (!fCachedOpsRenderPass) {
328 fCachedOpsRenderPass.reset(new GrVkOpsRenderPass(this));
329 }
330
331 fCachedOpsRenderPass->set(rt, origin, bounds, colorInfo, stencilInfo, sampledProxies);
332 return fCachedOpsRenderPass.get();
333 }
334
submitCommandBuffer(SyncQueue sync,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)335 void GrVkGpu::submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc,
336 GrGpuFinishedContext finishedContext) {
337 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
338 SkASSERT(fCurrentCmdBuffer);
339 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
340
341 if (!fCurrentCmdBuffer->hasWork() && kForce_SyncQueue != sync &&
342 !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
343 SkASSERT(fDrawables.empty());
344 fResourceProvider.checkCommandBuffers();
345 if (finishedProc) {
346 fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
347 }
348 return;
349 }
350
351 fCurrentCmdBuffer->end(this);
352 fCmdPool->close();
353 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync, fSemaphoresToSignal, fSemaphoresToWaitOn);
354
355 if (finishedProc) {
356 // Make sure this is called after closing the current command pool
357 fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
358 }
359
360 // We must delete and drawables that have been waitint till submit for us to destroy.
361 fDrawables.reset();
362
363 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
364 fSemaphoresToWaitOn[i]->unref(this);
365 }
366 fSemaphoresToWaitOn.reset();
367 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
368 fSemaphoresToSignal[i]->unref(this);
369 }
370 fSemaphoresToSignal.reset();
371
372 // Release old command pool and create a new one
373 fCmdPool->unref(this);
374 fResourceProvider.checkCommandBuffers();
375 fCmdPool = fResourceProvider.findOrCreateCommandPool();
376 fCurrentCmdBuffer = fCmdPool->getPrimaryCommandBuffer();
377 fCurrentCmdBuffer->begin(this);
378 }
379
380 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)381 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
382 GrAccessPattern accessPattern, const void* data) {
383 sk_sp<GrGpuBuffer> buff;
384 switch (type) {
385 case GrGpuBufferType::kVertex:
386 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
387 kStatic_GrAccessPattern == accessPattern);
388 buff = GrVkVertexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
389 break;
390 case GrGpuBufferType::kIndex:
391 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
392 kStatic_GrAccessPattern == accessPattern);
393 buff = GrVkIndexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
394 break;
395 case GrGpuBufferType::kXferCpuToGpu:
396 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
397 kStream_GrAccessPattern == accessPattern);
398 buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyRead_Type);
399 break;
400 case GrGpuBufferType::kXferGpuToCpu:
401 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
402 kStream_GrAccessPattern == accessPattern);
403 buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyWrite_Type);
404 break;
405 default:
406 SK_ABORT("Unknown buffer type.");
407 }
408 if (data && buff) {
409 buff->updateData(data, size);
410 }
411 return buff;
412 }
413
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)414 bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
415 GrColorType surfaceColorType, GrColorType srcColorType,
416 const GrMipLevel texels[], int mipLevelCount,
417 bool prepForTexSampling) {
418 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
419 if (!vkTex) {
420 return false;
421 }
422
423 // Make sure we have at least the base level
424 if (!mipLevelCount || !texels[0].fPixels) {
425 return false;
426 }
427
428 SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
429 bool success = false;
430 bool linearTiling = vkTex->isLinearTiled();
431 if (linearTiling) {
432 if (mipLevelCount > 1) {
433 SkDebugf("Can't upload mipmap data to linear tiled texture");
434 return false;
435 }
436 if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
437 // Need to change the layout to general in order to perform a host write
438 vkTex->setImageLayout(this,
439 VK_IMAGE_LAYOUT_GENERAL,
440 VK_ACCESS_HOST_WRITE_BIT,
441 VK_PIPELINE_STAGE_HOST_BIT,
442 false);
443 this->submitCommandBuffer(kForce_SyncQueue);
444 }
445 success = this->uploadTexDataLinear(vkTex, left, top, width, height, srcColorType,
446 texels[0].fPixels, texels[0].fRowBytes);
447 } else {
448 SkASSERT(mipLevelCount <= vkTex->texturePriv().maxMipMapLevel() + 1);
449 success = this->uploadTexDataOptimal(vkTex, left, top, width, height, srcColorType, texels,
450 mipLevelCount);
451 }
452
453 if (prepForTexSampling) {
454 vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
455 VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
456 false);
457 }
458
459 return success;
460 }
461
onTransferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t bufferOffset,size_t rowBytes)462 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
463 GrColorType surfaceColorType, GrColorType bufferColorType,
464 GrGpuBuffer* transferBuffer, size_t bufferOffset,
465 size_t rowBytes) {
466 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
467 if ((bufferOffset & 0x3) || (bufferOffset % GrColorTypeBytesPerPixel(bufferColorType))) {
468 return false;
469 }
470 GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
471 if (!vkTex) {
472 return false;
473 }
474
475 // Can't transfer compressed data
476 SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
477
478 GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
479 if (!vkBuffer) {
480 return false;
481 }
482
483 SkDEBUGCODE(
484 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
485 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
486 SkASSERT(bounds.contains(subRect));
487 )
488 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
489
490 // Set up copy region
491 VkBufferImageCopy region;
492 memset(®ion, 0, sizeof(VkBufferImageCopy));
493 region.bufferOffset = bufferOffset;
494 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
495 region.bufferImageHeight = 0;
496 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
497 region.imageOffset = { left, top, 0 };
498 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
499
500 // Change layout of our target so it can be copied to
501 vkTex->setImageLayout(this,
502 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
503 VK_ACCESS_TRANSFER_WRITE_BIT,
504 VK_PIPELINE_STAGE_TRANSFER_BIT,
505 false);
506
507 // Copy the buffer to the image
508 fCurrentCmdBuffer->copyBufferToImage(this,
509 vkBuffer,
510 vkTex,
511 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
512 1,
513 ®ion);
514
515 vkTex->texturePriv().markMipMapsDirty();
516 return true;
517 }
518
onTransferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)519 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
520 GrColorType surfaceColorType, GrColorType bufferColorType,
521 GrGpuBuffer* transferBuffer, size_t offset) {
522 SkASSERT(surface);
523 SkASSERT(transferBuffer);
524 if (fProtectedContext == GrProtected::kYes) {
525 return false;
526 }
527
528 GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
529
530 GrVkImage* srcImage;
531 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
532 // Reading from render targets that wrap a secondary command buffer is not allowed since
533 // it would require us to know the VkImage, which we don't have, as well as need us to
534 // stop and start the VkRenderPass which we don't have access to.
535 if (rt->wrapsSecondaryCommandBuffer()) {
536 return false;
537 }
538 srcImage = rt;
539 } else {
540 srcImage = static_cast<GrVkTexture*>(surface->asTexture());
541 }
542
543 // Set up copy region
544 VkBufferImageCopy region;
545 memset(®ion, 0, sizeof(VkBufferImageCopy));
546 region.bufferOffset = offset;
547 region.bufferRowLength = width;
548 region.bufferImageHeight = 0;
549 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
550 region.imageOffset = { left, top, 0 };
551 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
552
553 srcImage->setImageLayout(this,
554 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
555 VK_ACCESS_TRANSFER_READ_BIT,
556 VK_PIPELINE_STAGE_TRANSFER_BIT,
557 false);
558
559 fCurrentCmdBuffer->copyImageToBuffer(this, srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
560 vkBuffer, 1, ®ion);
561
562 // Make sure the copy to buffer has finished.
563 vkBuffer->addMemoryBarrier(this,
564 VK_ACCESS_TRANSFER_WRITE_BIT,
565 VK_ACCESS_HOST_READ_BIT,
566 VK_PIPELINE_STAGE_TRANSFER_BIT,
567 VK_PIPELINE_STAGE_HOST_BIT,
568 false);
569 return true;
570 }
571
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)572 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
573 const SkIPoint& dstPoint) {
574 SkASSERT(dst);
575 SkASSERT(src && src->numSamples() > 1 && src->msaaImage());
576
577 VkImageResolve resolveInfo;
578 resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
579 resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
580 resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
581 resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
582 resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
583
584 GrVkImage* dstImage;
585 GrRenderTarget* dstRT = dst->asRenderTarget();
586 if (dstRT) {
587 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
588 dstImage = vkRT;
589 } else {
590 SkASSERT(dst->asTexture());
591 dstImage = static_cast<GrVkTexture*>(dst->asTexture());
592 }
593 dstImage->setImageLayout(this,
594 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
595 VK_ACCESS_TRANSFER_WRITE_BIT,
596 VK_PIPELINE_STAGE_TRANSFER_BIT,
597 false);
598
599 src->msaaImage()->setImageLayout(this,
600 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
601 VK_ACCESS_TRANSFER_READ_BIT,
602 VK_PIPELINE_STAGE_TRANSFER_BIT,
603 false);
604
605 fCurrentCmdBuffer->resolveImage(this, *src->msaaImage(), *dstImage, 1, &resolveInfo);
606 }
607
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect,GrSurfaceOrigin resolveOrigin,ForExternalIO forExternalIO)608 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
609 GrSurfaceOrigin resolveOrigin, ForExternalIO forExternalIO) {
610 SkASSERT(target->numSamples() > 1);
611 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
612 SkASSERT(rt->msaaImage());
613
614 auto nativeResolveRect = GrNativeRect::MakeRelativeTo(
615 resolveOrigin, target->height(), resolveRect);
616 this->resolveImage(target, rt, nativeResolveRect.asSkIRect(),
617 SkIPoint::Make(nativeResolveRect.fX, nativeResolveRect.fY));
618
619 if (ForExternalIO::kYes == forExternalIO) {
620 // This resolve is called when we are preparing an msaa surface for external I/O. It is
621 // called after flushing, so we need to make sure we submit the command buffer after doing
622 // the resolve so that the resolve actually happens.
623 this->submitCommandBuffer(kSkip_SyncQueue);
624 }
625 }
626
uploadTexDataLinear(GrVkTexture * tex,int left,int top,int width,int height,GrColorType dataColorType,const void * data,size_t rowBytes)627 bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
628 GrColorType dataColorType, const void* data, size_t rowBytes) {
629 SkASSERT(data);
630 SkASSERT(tex->isLinearTiled());
631
632 SkDEBUGCODE(
633 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
634 SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height());
635 SkASSERT(bounds.contains(subRect));
636 )
637 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
638 size_t trimRowBytes = width * bpp;
639
640 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
641 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
642 const VkImageSubresource subres = {
643 VK_IMAGE_ASPECT_COLOR_BIT,
644 0, // mipLevel
645 0, // arraySlice
646 };
647 VkSubresourceLayout layout;
648
649 const GrVkInterface* interface = this->vkInterface();
650
651 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
652 tex->image(),
653 &subres,
654 &layout));
655
656 const GrVkAlloc& alloc = tex->alloc();
657 if (VK_NULL_HANDLE == alloc.fMemory) {
658 return false;
659 }
660 VkDeviceSize offset = top * layout.rowPitch + left * bpp;
661 VkDeviceSize size = height*layout.rowPitch;
662 SkASSERT(size + offset <= alloc.fSize);
663 void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
664 if (!mapPtr) {
665 return false;
666 }
667 mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
668
669 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
670 height);
671
672 GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
673 GrVkMemory::UnmapAlloc(this, alloc);
674
675 return true;
676 }
677
uploadTexDataOptimal(GrVkTexture * tex,int left,int top,int width,int height,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)678 bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
679 GrColorType dataColorType, const GrMipLevel texels[],
680 int mipLevelCount) {
681 SkASSERT(!tex->isLinearTiled());
682 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
683 SkASSERT(1 == mipLevelCount ||
684 (0 == left && 0 == top && width == tex->width() && height == tex->height()));
685
686 // We assume that if the texture has mip levels, we either upload to all the levels or just the
687 // first.
688 SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1));
689
690 if (width == 0 || height == 0) {
691 return false;
692 }
693
694 if (GrPixelConfigToColorType(tex->config()) != dataColorType) {
695 return false;
696 }
697
698 // For RGB_888x src data we are uploading it first to an RGBA texture and then copying it to the
699 // dst RGB texture. Thus we do not upload mip levels for that.
700 if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
701 SkASSERT(tex->config() == kRGB_888_GrPixelConfig);
702 // First check that we'll be able to do the copy to the to the R8G8B8 image in the end via a
703 // blit or draw.
704 if (!this->vkCaps().formatCanBeDstofBlit(VK_FORMAT_R8G8B8_UNORM, tex->isLinearTiled()) &&
705 !this->vkCaps().isFormatRenderable(VK_FORMAT_R8G8B8_UNORM, 1)) {
706 return false;
707 }
708 mipLevelCount = 1;
709 }
710
711 SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
712 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
713
714 // texels is const.
715 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
716 // Because of this we need to make a non-const shallow copy of texels.
717 SkAutoTMalloc<GrMipLevel> texelsShallowCopy;
718
719 texelsShallowCopy.reset(mipLevelCount);
720 memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel));
721
722 SkTArray<size_t> individualMipOffsets(mipLevelCount);
723 individualMipOffsets.push_back(0);
724 size_t combinedBufferSize = width * bpp * height;
725 int currentWidth = width;
726 int currentHeight = height;
727 if (!texelsShallowCopy[0].fPixels) {
728 combinedBufferSize = 0;
729 }
730
731 // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
732 // config. This works with the assumption that the bytes in pixel config is always a power of 2.
733 SkASSERT((bpp & (bpp - 1)) == 0);
734 const size_t alignmentMask = 0x3 | (bpp - 1);
735 for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; currentMipLevel++) {
736 currentWidth = SkTMax(1, currentWidth/2);
737 currentHeight = SkTMax(1, currentHeight/2);
738
739 if (texelsShallowCopy[currentMipLevel].fPixels) {
740 const size_t trimmedSize = currentWidth * bpp * currentHeight;
741 const size_t alignmentDiff = combinedBufferSize & alignmentMask;
742 if (alignmentDiff != 0) {
743 combinedBufferSize += alignmentMask - alignmentDiff + 1;
744 }
745 individualMipOffsets.push_back(combinedBufferSize);
746 combinedBufferSize += trimmedSize;
747 } else {
748 individualMipOffsets.push_back(0);
749 }
750 }
751 if (0 == combinedBufferSize) {
752 // We don't actually have any data to upload so just return success
753 return true;
754 }
755
756 // allocate buffer to hold our mip data
757 sk_sp<GrVkTransferBuffer> transferBuffer =
758 GrVkTransferBuffer::Make(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
759 if (!transferBuffer) {
760 return false;
761 }
762
763 int uploadLeft = left;
764 int uploadTop = top;
765 GrVkTexture* uploadTexture = tex;
766 // For uploading RGB_888x data to an R8G8B8_UNORM texture we must first upload the data to an
767 // R8G8B8A8_UNORM image and then copy it.
768 sk_sp<GrVkTexture> copyTexture;
769 if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
770 bool dstHasYcbcr = tex->ycbcrConversionInfo().isValid();
771 if (!this->vkCaps().canCopyAsBlit(tex->imageFormat(), 1, false, dstHasYcbcr,
772 VK_FORMAT_R8G8B8A8_UNORM, 1, false, false)) {
773 return false;
774 }
775 GrSurfaceDesc surfDesc;
776 surfDesc.fWidth = width;
777 surfDesc.fHeight = height;
778 surfDesc.fConfig = kRGBA_8888_GrPixelConfig;
779
780 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
781 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
782 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
783
784 GrVkImage::ImageDesc imageDesc;
785 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
786 imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
787 imageDesc.fWidth = width;
788 imageDesc.fHeight = height;
789 imageDesc.fLevels = 1;
790 imageDesc.fSamples = 1;
791 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
792 imageDesc.fUsageFlags = usageFlags;
793 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
794
795 copyTexture = GrVkTexture::MakeNewTexture(this, SkBudgeted::kYes, surfDesc, imageDesc,
796 GrMipMapsStatus::kNotAllocated);
797 if (!copyTexture) {
798 return false;
799 }
800
801 uploadTexture = copyTexture.get();
802 uploadLeft = 0;
803 uploadTop = 0;
804 }
805
806 char* buffer = (char*) transferBuffer->map();
807 SkTArray<VkBufferImageCopy> regions(mipLevelCount);
808
809 currentWidth = width;
810 currentHeight = height;
811 int layerHeight = uploadTexture->height();
812 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
813 if (texelsShallowCopy[currentMipLevel].fPixels) {
814 SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
815 const size_t trimRowBytes = currentWidth * bpp;
816 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
817
818 // copy data into the buffer, skipping the trailing bytes
819 char* dst = buffer + individualMipOffsets[currentMipLevel];
820 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
821 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
822
823 VkBufferImageCopy& region = regions.push_back();
824 memset(®ion, 0, sizeof(VkBufferImageCopy));
825 region.bufferOffset = transferBuffer->offset() + individualMipOffsets[currentMipLevel];
826 region.bufferRowLength = currentWidth;
827 region.bufferImageHeight = currentHeight;
828 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
829 region.imageOffset = {uploadLeft, uploadTop, 0};
830 region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
831 }
832 currentWidth = SkTMax(1, currentWidth/2);
833 currentHeight = SkTMax(1, currentHeight/2);
834 layerHeight = currentHeight;
835 }
836
837 // no need to flush non-coherent memory, unmap will do that for us
838 transferBuffer->unmap();
839
840 // Change layout of our target so it can be copied to
841 uploadTexture->setImageLayout(this,
842 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
843 VK_ACCESS_TRANSFER_WRITE_BIT,
844 VK_PIPELINE_STAGE_TRANSFER_BIT,
845 false);
846
847 // Copy the buffer to the image
848 fCurrentCmdBuffer->copyBufferToImage(this,
849 transferBuffer.get(),
850 uploadTexture,
851 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
852 regions.count(),
853 regions.begin());
854
855 // If we copied the data into a temporary image first, copy that image into our main texture
856 // now.
857 if (copyTexture.get()) {
858 SkASSERT(dataColorType == GrColorType::kRGB_888x);
859 SkAssertResult(this->copySurface(tex, copyTexture.get(), SkIRect::MakeWH(width, height),
860 SkIPoint::Make(left, top)));
861 }
862 if (1 == mipLevelCount) {
863 tex->texturePriv().markMipMapsDirty();
864 }
865
866 return true;
867 }
868
869 // It's probably possible to roll this into uploadTexDataOptimal,
870 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkTexture * tex,int left,int top,int width,int height,SkImage::CompressionType compressionType,const void * data)871 bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height,
872 SkImage::CompressionType compressionType, const void* data) {
873 SkASSERT(data);
874 SkASSERT(!tex->isLinearTiled());
875 // For now the assumption is that our rect is the entire texture.
876 // Compressed textures are read-only so this should be a reasonable assumption.
877 SkASSERT(0 == left && 0 == top && width == tex->width() && height == tex->height());
878
879 if (width == 0 || height == 0) {
880 return false;
881 }
882
883 SkImage::CompressionType textureCompressionType;
884 if (!GrVkFormatToCompressionType(tex->imageFormat(), &textureCompressionType) ||
885 textureCompressionType != compressionType) {
886 return false;
887 }
888
889 SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
890
891 size_t dataSize = GrCompressedDataSize(compressionType, width, height);
892
893 // allocate buffer to hold our mip data
894 sk_sp<GrVkTransferBuffer> transferBuffer =
895 GrVkTransferBuffer::Make(this, dataSize, GrVkBuffer::kCopyRead_Type);
896 if (!transferBuffer) {
897 return false;
898 }
899
900 int uploadLeft = left;
901 int uploadTop = top;
902 GrVkTexture* uploadTexture = tex;
903
904 char* buffer = (char*)transferBuffer->map();
905
906 memcpy(buffer, data, dataSize);
907
908 VkBufferImageCopy region;
909 memset(®ion, 0, sizeof(VkBufferImageCopy));
910 region.bufferOffset = transferBuffer->offset();
911 region.bufferRowLength = width;
912 region.bufferImageHeight = height;
913 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
914 region.imageOffset = { uploadLeft, uploadTop, 0 };
915 region.imageExtent = { SkToU32(width), SkToU32(height), 1 };
916
917 // no need to flush non-coherent memory, unmap will do that for us
918 transferBuffer->unmap();
919
920 // Change layout of our target so it can be copied to
921 uploadTexture->setImageLayout(this,
922 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
923 VK_ACCESS_TRANSFER_WRITE_BIT,
924 VK_PIPELINE_STAGE_TRANSFER_BIT,
925 false);
926
927 // Copy the buffer to the image
928 fCurrentCmdBuffer->copyBufferToImage(this,
929 transferBuffer.get(),
930 uploadTexture,
931 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
932 1,
933 ®ion);
934
935 return true;
936 }
937
938 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)939 sk_sp<GrTexture> GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc,
940 const GrBackendFormat& format,
941 GrRenderable renderable,
942 int renderTargetSampleCnt,
943 SkBudgeted budgeted,
944 GrProtected isProtected,
945 int mipLevelCount,
946 uint32_t levelClearMask) {
947 VkFormat pixelFormat;
948 SkAssertResult(format.asVkFormat(&pixelFormat));
949 SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
950
951 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
952 if (renderable == GrRenderable::kYes) {
953 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
954 }
955
956 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
957 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
958 // will be using this texture in some copy or not. Also this assumes, as is the current case,
959 // that all render targets in vulkan are also textures. If we change this practice of setting
960 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
961 // texture.
962 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
963
964 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
965 // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
966 // to 1.
967 SkASSERT(mipLevelCount > 0);
968 GrVkImage::ImageDesc imageDesc;
969 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
970 imageDesc.fFormat = pixelFormat;
971 imageDesc.fWidth = desc.fWidth;
972 imageDesc.fHeight = desc.fHeight;
973 imageDesc.fLevels = mipLevelCount;
974 imageDesc.fSamples = 1;
975 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
976 imageDesc.fUsageFlags = usageFlags;
977 imageDesc.fIsProtected = isProtected;
978
979 GrMipMapsStatus mipMapsStatus =
980 mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated;
981
982 sk_sp<GrVkTexture> tex;
983 if (renderable == GrRenderable::kYes) {
984 tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
985 this, budgeted, desc, renderTargetSampleCnt, imageDesc, mipMapsStatus);
986 } else {
987 tex = GrVkTexture::MakeNewTexture(this, budgeted, desc, imageDesc, mipMapsStatus);
988 }
989
990 if (!tex) {
991 return nullptr;
992 }
993
994 if (levelClearMask) {
995 SkSTArray<1, VkImageSubresourceRange> ranges;
996 bool inRange = false;
997 for (uint32_t i = 0; i < tex->mipLevels(); ++i) {
998 if (levelClearMask & (1U << i)) {
999 if (inRange) {
1000 ranges.back().levelCount++;
1001 } else {
1002 auto& range = ranges.push_back();
1003 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1004 range.baseArrayLayer = 0;
1005 range.baseMipLevel = i;
1006 range.layerCount = 1;
1007 range.levelCount = 1;
1008 inRange = true;
1009 }
1010 } else if (inRange) {
1011 inRange = false;
1012 }
1013 }
1014 SkASSERT(!ranges.empty());
1015 static constexpr VkClearColorValue kZeroClearColor = {};
1016 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1017 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1018 this->currentCommandBuffer()->clearColorImage(this, tex.get(), &kZeroClearColor,
1019 ranges.count(), ranges.begin());
1020 }
1021 return tex;
1022 }
1023
onCreateCompressedTexture(int width,int height,const GrBackendFormat & format,SkImage::CompressionType compressionType,SkBudgeted budgeted,const void * data)1024 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(int width, int height,
1025 const GrBackendFormat& format,
1026 SkImage::CompressionType compressionType,
1027 SkBudgeted budgeted, const void* data) {
1028 VkFormat pixelFormat;
1029 if (!format.asVkFormat(&pixelFormat)) {
1030 return nullptr;
1031 }
1032
1033 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
1034
1035 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
1036 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
1037 // will be using this texture in some copy or not. Also this assumes, as is the current case,
1038 // that all render targets in vulkan are also textures. If we change this practice of setting
1039 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
1040 // texture.
1041 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1042
1043 // Compressed textures with MIP levels or multiple samples are not supported as of now.
1044 GrVkImage::ImageDesc imageDesc;
1045 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1046 imageDesc.fFormat = pixelFormat;
1047 imageDesc.fWidth = width;
1048 imageDesc.fHeight = height;
1049 imageDesc.fLevels = 1;
1050 imageDesc.fSamples = 1;
1051 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1052 imageDesc.fUsageFlags = usageFlags;
1053 imageDesc.fIsProtected = GrProtected::kNo;
1054
1055 GrSurfaceDesc desc;
1056 desc.fConfig = GrCompressionTypePixelConfig(compressionType);
1057 desc.fWidth = width;
1058 desc.fHeight = height;
1059 auto tex = GrVkTexture::MakeNewTexture(this, budgeted, desc, imageDesc,
1060 GrMipMapsStatus::kNotAllocated);
1061 if (!tex) {
1062 return nullptr;
1063 }
1064
1065 if (!this->uploadTexDataCompressed(tex.get(), 0, 0, desc.fWidth, desc.fHeight, compressionType,
1066 data)) {
1067 return nullptr;
1068 }
1069
1070 return tex;
1071 }
1072
1073 ////////////////////////////////////////////////////////////////////////////////
1074
copyBuffer(GrVkBuffer * srcBuffer,GrVkBuffer * dstBuffer,VkDeviceSize srcOffset,VkDeviceSize dstOffset,VkDeviceSize size)1075 void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
1076 VkDeviceSize dstOffset, VkDeviceSize size) {
1077 VkBufferCopy copyRegion;
1078 copyRegion.srcOffset = srcOffset;
1079 copyRegion.dstOffset = dstOffset;
1080 copyRegion.size = size;
1081 fCurrentCmdBuffer->copyBuffer(this, srcBuffer, dstBuffer, 1, ©Region);
1082 }
1083
updateBuffer(GrVkBuffer * buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1084 bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
1085 VkDeviceSize offset, VkDeviceSize size) {
1086 // Update the buffer
1087 fCurrentCmdBuffer->updateBuffer(this, buffer, offset, size, src);
1088
1089 return true;
1090 }
1091
1092 ////////////////////////////////////////////////////////////////////////////////
1093
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,GrColorType colorType,bool needsAllocation)1094 static bool check_image_info(const GrVkCaps& caps,
1095 const GrVkImageInfo& info,
1096 GrColorType colorType,
1097 bool needsAllocation) {
1098 if (VK_NULL_HANDLE == info.fImage) {
1099 return false;
1100 }
1101
1102 if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1103 return false;
1104 }
1105
1106 if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1107 return false;
1108 }
1109
1110 if (info.fYcbcrConversionInfo.isValid()) {
1111 if (!caps.supportsYcbcrConversion()) {
1112 return false;
1113 }
1114 if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1115 return true;
1116 }
1117 }
1118
1119 SkASSERT(GrVkFormatColorTypePairIsValid(info.fFormat, colorType));
1120 return true;
1121 }
1122
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1123 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1124 if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1125 return true;
1126 }
1127 if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1128 if (!caps.isVkFormatTexturable(info.fFormat)) {
1129 return false;
1130 }
1131 } else {
1132 SkASSERT(info.fImageTiling == VK_IMAGE_TILING_LINEAR);
1133 if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1134 return false;
1135 }
1136 }
1137 return true;
1138 }
1139
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,int sampleCnt)1140 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, int sampleCnt) {
1141 if (!caps.isFormatRenderable(info.fFormat, sampleCnt)) {
1142 return false;
1143 }
1144 return true;
1145 }
1146
onWrapBackendTexture(const GrBackendTexture & backendTex,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1147 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1148 GrColorType colorType, GrWrapOwnership ownership,
1149 GrWrapCacheable cacheable, GrIOType ioType) {
1150 GrVkImageInfo imageInfo;
1151 if (!backendTex.getVkImageInfo(&imageInfo)) {
1152 return nullptr;
1153 }
1154
1155 if (!check_image_info(this->vkCaps(), imageInfo, colorType,
1156 kAdopt_GrWrapOwnership == ownership)) {
1157 return nullptr;
1158 }
1159 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1160 return nullptr;
1161 }
1162
1163 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1164 return nullptr;
1165 }
1166
1167 GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
1168 colorType);
1169 SkASSERT(kUnknown_GrPixelConfig != config);
1170
1171 GrSurfaceDesc surfDesc;
1172 surfDesc.fWidth = backendTex.width();
1173 surfDesc.fHeight = backendTex.height();
1174 surfDesc.fConfig = config;
1175
1176 sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
1177 SkASSERT(layout);
1178 return GrVkTexture::MakeWrappedTexture(this, surfDesc, ownership, cacheable, ioType, imageInfo,
1179 std::move(layout));
1180 }
1181
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable)1182 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1183 int sampleCnt,
1184 GrColorType colorType,
1185 GrWrapOwnership ownership,
1186 GrWrapCacheable cacheable) {
1187 GrVkImageInfo imageInfo;
1188 if (!backendTex.getVkImageInfo(&imageInfo)) {
1189 return nullptr;
1190 }
1191
1192 if (!check_image_info(this->vkCaps(), imageInfo, colorType,
1193 kAdopt_GrWrapOwnership == ownership)) {
1194 return nullptr;
1195 }
1196 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1197 return nullptr;
1198 }
1199 if (!check_rt_image_info(this->vkCaps(), imageInfo, sampleCnt)) {
1200 return nullptr;
1201 }
1202
1203 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1204 return nullptr;
1205 }
1206
1207
1208 GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
1209 colorType);
1210 SkASSERT(kUnknown_GrPixelConfig != config);
1211
1212 GrSurfaceDesc surfDesc;
1213 surfDesc.fWidth = backendTex.width();
1214 surfDesc.fHeight = backendTex.height();
1215 surfDesc.fConfig = config;
1216 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1217
1218 sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
1219 SkASSERT(layout);
1220
1221 return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(
1222 this, surfDesc, sampleCnt, ownership, cacheable, imageInfo, std::move(layout));
1223 }
1224
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT,GrColorType colorType)1225 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
1226 GrColorType colorType) {
1227 // Currently the Vulkan backend does not support wrapping of msaa render targets directly. In
1228 // general this is not an issue since swapchain images in vulkan are never multisampled. Thus if
1229 // you want a multisampled RT it is best to wrap the swapchain images and then let Skia handle
1230 // creating and owning the MSAA images.
1231 if (backendRT.sampleCnt() > 1) {
1232 return nullptr;
1233 }
1234
1235 GrVkImageInfo info;
1236 if (!backendRT.getVkImageInfo(&info)) {
1237 return nullptr;
1238 }
1239
1240 GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendRT.getBackendFormat(),
1241 colorType);
1242 SkASSERT(kUnknown_GrPixelConfig != config);
1243
1244 if (!check_image_info(this->vkCaps(), info, colorType, false)) {
1245 return nullptr;
1246 }
1247 if (!check_rt_image_info(this->vkCaps(), info, backendRT.sampleCnt())) {
1248 return nullptr;
1249 }
1250
1251 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1252 return nullptr;
1253 }
1254
1255 GrSurfaceDesc desc;
1256 desc.fWidth = backendRT.width();
1257 desc.fHeight = backendRT.height();
1258 desc.fConfig = config;
1259
1260 sk_sp<GrVkImageLayout> layout = backendRT.getGrVkImageLayout();
1261
1262 sk_sp<GrVkRenderTarget> tgt =
1263 GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, 1, info, std::move(layout));
1264
1265 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1266 SkASSERT(!backendRT.stencilBits());
1267 if (tgt) {
1268 SkASSERT(tgt->canAttemptStencilAttachment());
1269 }
1270
1271 return tgt;
1272 }
1273
onWrapBackendTextureAsRenderTarget(const GrBackendTexture & tex,int sampleCnt,GrColorType grColorType)1274 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
1275 int sampleCnt,
1276 GrColorType grColorType) {
1277
1278 GrVkImageInfo imageInfo;
1279 if (!tex.getVkImageInfo(&imageInfo)) {
1280 return nullptr;
1281 }
1282 if (!check_image_info(this->vkCaps(), imageInfo, grColorType, false)) {
1283 return nullptr;
1284 }
1285 if (!check_rt_image_info(this->vkCaps(), imageInfo, sampleCnt)) {
1286 return nullptr;
1287 }
1288
1289 if (tex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1290 return nullptr;
1291 }
1292
1293 GrPixelConfig config = this->caps()->getConfigFromBackendFormat(tex.getBackendFormat(),
1294 grColorType);
1295 SkASSERT(kUnknown_GrPixelConfig != config);
1296
1297 GrSurfaceDesc desc;
1298 desc.fWidth = tex.width();
1299 desc.fHeight = tex.height();
1300 desc.fConfig = config;
1301
1302 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1303 if (!sampleCnt) {
1304 return nullptr;
1305 }
1306
1307 sk_sp<GrVkImageLayout> layout = tex.getGrVkImageLayout();
1308 SkASSERT(layout);
1309
1310 return GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, sampleCnt, imageInfo,
1311 std::move(layout));
1312 }
1313
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1314 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1315 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1316 int maxSize = this->caps()->maxTextureSize();
1317 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1318 return nullptr;
1319 }
1320
1321 GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1322 if (!backendFormat.isValid()) {
1323 return nullptr;
1324 }
1325 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1326 if (!sampleCnt) {
1327 return nullptr;
1328 }
1329
1330 GrColorType grColorType = SkColorTypeToGrColorType(imageInfo.colorType());
1331 GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendFormat, grColorType);
1332 if (config == kUnknown_GrPixelConfig) {
1333 return nullptr;
1334 }
1335
1336 GrSurfaceDesc desc;
1337 desc.fWidth = imageInfo.width();
1338 desc.fHeight = imageInfo.height();
1339 desc.fConfig = config;
1340
1341 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, desc, vkInfo);
1342 }
1343
onRegenerateMipMapLevels(GrTexture * tex)1344 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1345 auto* vkTex = static_cast<GrVkTexture*>(tex);
1346 // don't do anything for linearly tiled textures (can't have mipmaps)
1347 if (vkTex->isLinearTiled()) {
1348 SkDebugf("Trying to create mipmap for linear tiled texture");
1349 return false;
1350 }
1351 SkASSERT(tex->texturePriv().textureType() == GrTextureType::k2D);
1352
1353 // determine if we can blit to and from this format
1354 const GrVkCaps& caps = this->vkCaps();
1355 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1356 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1357 !caps.mipMapSupport()) {
1358 return false;
1359 }
1360
1361 int width = tex->width();
1362 int height = tex->height();
1363 VkImageBlit blitRegion;
1364 memset(&blitRegion, 0, sizeof(VkImageBlit));
1365
1366 // SkMipMap doesn't include the base level in the level count so we have to add 1
1367 uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1368 SkASSERT(levelCount == vkTex->mipLevels());
1369
1370 // change layout of the layers so we can write to them.
1371 vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1372 VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1373
1374 // setup memory barrier
1375 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1376 VkImageMemoryBarrier imageMemoryBarrier = {
1377 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1378 nullptr, // pNext
1379 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1380 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1381 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout
1382 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1383 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1384 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1385 vkTex->image(), // image
1386 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1387 };
1388
1389 // Blit the miplevels
1390 uint32_t mipLevel = 1;
1391 while (mipLevel < levelCount) {
1392 int prevWidth = width;
1393 int prevHeight = height;
1394 width = SkTMax(1, width / 2);
1395 height = SkTMax(1, height / 2);
1396
1397 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1398 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1399 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1400
1401 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1402 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1403 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1404 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1405 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1406 blitRegion.dstOffsets[1] = { width, height, 1 };
1407 fCurrentCmdBuffer->blitImage(this,
1408 vkTex->resource(),
1409 vkTex->image(),
1410 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1411 vkTex->resource(),
1412 vkTex->image(),
1413 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1414 1,
1415 &blitRegion,
1416 VK_FILTER_LINEAR);
1417 ++mipLevel;
1418 }
1419 if (levelCount > 1) {
1420 // This barrier logically is not needed, but it changes the final level to the same layout
1421 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1422 // layouts and future layout changes easier. The alternative here would be to track layout
1423 // and memory accesses per layer which doesn't seem work it.
1424 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1425 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1426 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1427 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1428 }
1429 return true;
1430 }
1431
1432 ////////////////////////////////////////////////////////////////////////////////
1433
createStencilAttachmentForRenderTarget(const GrRenderTarget * rt,int width,int height,int numStencilSamples)1434 GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(
1435 const GrRenderTarget* rt, int width, int height, int numStencilSamples) {
1436 SkASSERT(numStencilSamples == rt->numSamples());
1437 SkASSERT(width >= rt->width());
1438 SkASSERT(height >= rt->height());
1439
1440 int samples = rt->numSamples();
1441
1442 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferredStencilFormat();
1443
1444 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
1445 width,
1446 height,
1447 samples,
1448 sFmt));
1449 fStats.incStencilAttachmentCreates();
1450 return stencil;
1451 }
1452
1453 ////////////////////////////////////////////////////////////////////////////////
1454
copy_src_data(GrVkGpu * gpu,const GrVkAlloc & alloc,VkFormat vkFormat,const SkTArray<size_t> & individualMipOffsets,const SkPixmap srcData[],int numMipLevels)1455 bool copy_src_data(GrVkGpu* gpu, const GrVkAlloc& alloc, VkFormat vkFormat,
1456 const SkTArray<size_t>& individualMipOffsets,
1457 const SkPixmap srcData[], int numMipLevels) {
1458 SkASSERT(srcData && numMipLevels);
1459 SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1460 SkASSERT(individualMipOffsets.count() == numMipLevels);
1461
1462 char* mapPtr = (char*) GrVkMemory::MapAlloc(gpu, alloc);
1463 if (!mapPtr) {
1464 return false;
1465 }
1466 size_t bytesPerPixel = gpu->vkCaps().bytesPerPixel(vkFormat);
1467
1468 for (int level = 0; level < numMipLevels; ++level) {
1469 const size_t trimRB = srcData[level].width() * bytesPerPixel;
1470 SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= alloc.fSize);
1471
1472 SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1473 srcData[level].addr(), srcData[level].rowBytes(),
1474 trimRB, srcData[level].height());
1475 }
1476
1477 GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, alloc.fSize);
1478 GrVkMemory::UnmapAlloc(gpu, alloc);
1479 return true;
1480 }
1481
set_image_layout(const GrVkInterface * vkInterface,VkCommandBuffer cmdBuffer,GrVkImageInfo * info,VkImageLayout newLayout,uint32_t mipLevels,VkAccessFlags dstAccessMask,VkPipelineStageFlagBits dstStageMask)1482 static void set_image_layout(const GrVkInterface* vkInterface, VkCommandBuffer cmdBuffer,
1483 GrVkImageInfo* info, VkImageLayout newLayout, uint32_t mipLevels,
1484 VkAccessFlags dstAccessMask, VkPipelineStageFlagBits dstStageMask) {
1485 VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(info->fImageLayout);
1486 VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(
1487 info->fImageLayout);
1488
1489 VkImageMemoryBarrier barrier;
1490 memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
1491 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1492 barrier.pNext = nullptr;
1493 barrier.srcAccessMask = srcAccessMask;
1494 barrier.dstAccessMask = dstAccessMask;
1495 barrier.oldLayout = info->fImageLayout;
1496 barrier.newLayout = newLayout;
1497 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1498 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1499 barrier.image = info->fImage;
1500 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
1501 GR_VK_CALL(vkInterface, CmdPipelineBarrier(
1502 cmdBuffer,
1503 srcStageMask,
1504 dstStageMask,
1505 0,
1506 0, nullptr,
1507 0, nullptr,
1508 1, &barrier));
1509 info->fImageLayout = newLayout;
1510 }
1511
createVkImageForBackendSurface(VkFormat vkFormat,int w,int h,bool texturable,bool renderable,GrMipMapped mipMapped,const SkPixmap srcData[],int numMipLevels,const SkColor4f * color,GrVkImageInfo * info,GrProtected isProtected)1512 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat, int w, int h, bool texturable,
1513 bool renderable, GrMipMapped mipMapped,
1514 const SkPixmap srcData[], int numMipLevels,
1515 const SkColor4f* color, GrVkImageInfo* info,
1516 GrProtected isProtected) {
1517 SkASSERT(texturable || renderable);
1518 if (!texturable) {
1519 SkASSERT(GrMipMapped::kNo == mipMapped);
1520 SkASSERT(!srcData && !numMipLevels);
1521 }
1522
1523 // Compressed formats go through onCreateCompressedBackendTexture
1524 SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1525
1526 if (fProtectedContext != isProtected) {
1527 return false;
1528 }
1529
1530 if (texturable && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1531 return false;
1532 }
1533
1534 if (renderable && !fVkCaps->isFormatRenderable(vkFormat, 1)) {
1535 return false;
1536 }
1537
1538 VkImageUsageFlags usageFlags = 0;
1539 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1540 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1541 if (texturable) {
1542 usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1543 }
1544 if (renderable) {
1545 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1546 }
1547
1548 // Figure out the number of mip levels.
1549 uint32_t mipLevelCount = 1;
1550 if (srcData) {
1551 SkASSERT(numMipLevels > 0);
1552 mipLevelCount = numMipLevels;
1553 } else if (GrMipMapped::kYes == mipMapped) {
1554 mipLevelCount = SkMipMap::ComputeLevelCount(w, h) + 1;
1555 }
1556
1557 GrVkImage::ImageDesc imageDesc;
1558 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1559 imageDesc.fFormat = vkFormat;
1560 imageDesc.fWidth = w;
1561 imageDesc.fHeight = h;
1562 imageDesc.fLevels = mipLevelCount;
1563 imageDesc.fSamples = 1;
1564 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1565 imageDesc.fUsageFlags = usageFlags;
1566 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1567 imageDesc.fIsProtected = fProtectedContext;
1568
1569 if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1570 SkDebugf("Failed to init image info\n");
1571 return false;
1572 }
1573
1574 if (!srcData && !color) {
1575 return true;
1576 }
1577
1578 // We need to declare these early so that we can delete them at the end outside of
1579 // the if block.
1580 GrVkAlloc bufferAlloc;
1581 VkBuffer buffer = VK_NULL_HANDLE;
1582
1583 VkResult err;
1584 const VkCommandBufferAllocateInfo cmdInfo = {
1585 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1586 nullptr, // pNext
1587 fCmdPool->vkCommandPool(), // commandPool
1588 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1589 1 // bufferCount
1590 };
1591
1592 VkCommandBuffer cmdBuffer;
1593 err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
1594 if (err) {
1595 GrVkImage::DestroyImageInfo(this, info);
1596 return false;
1597 }
1598
1599 VkCommandBufferBeginInfo cmdBufferBeginInfo;
1600 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1601 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1602 cmdBufferBeginInfo.pNext = nullptr;
1603 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
1604 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
1605
1606 err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
1607 SkASSERT(!err);
1608
1609 // Set image layout and add barrier
1610 set_image_layout(this->vkInterface(), cmdBuffer, info, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1611 mipLevelCount, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
1612
1613 if (srcData) {
1614 size_t bytesPerPixel = fVkCaps->bytesPerPixel(vkFormat);
1615 SkASSERT(w && h);
1616
1617 SkTArray<size_t> individualMipOffsets(mipLevelCount);
1618
1619 size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerPixel, w, h,
1620 &individualMipOffsets,
1621 mipLevelCount);
1622
1623 VkBufferCreateInfo bufInfo;
1624 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
1625 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1626 bufInfo.flags = fProtectedContext == GrProtected::kYes ? VK_BUFFER_CREATE_PROTECTED_BIT : 0;
1627 bufInfo.size = combinedBufferSize;
1628 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1629 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1630 bufInfo.queueFamilyIndexCount = 0;
1631 bufInfo.pQueueFamilyIndices = nullptr;
1632 err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
1633
1634 if (err) {
1635 GrVkImage::DestroyImageInfo(this, info);
1636 VK_CALL(EndCommandBuffer(cmdBuffer));
1637 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
1638 return false;
1639 }
1640
1641 if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type, true,
1642 &bufferAlloc)) {
1643 GrVkImage::DestroyImageInfo(this, info);
1644 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1645 VK_CALL(EndCommandBuffer(cmdBuffer));
1646 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
1647 return false;
1648 }
1649
1650 bool result = copy_src_data(this, bufferAlloc, vkFormat, individualMipOffsets,
1651 srcData, numMipLevels);
1652 if (!result) {
1653 GrVkImage::DestroyImageInfo(this, info);
1654 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
1655 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1656 VK_CALL(EndCommandBuffer(cmdBuffer));
1657 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
1658 return false;
1659 }
1660
1661 SkTArray<VkBufferImageCopy> regions(mipLevelCount);
1662
1663 int currentWidth = w;
1664 int currentHeight = h;
1665 for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1666 // Submit copy command
1667 VkBufferImageCopy& region = regions.push_back();
1668 memset(®ion, 0, sizeof(VkBufferImageCopy));
1669 region.bufferOffset = individualMipOffsets[currentMipLevel];
1670 region.bufferRowLength = currentWidth;
1671 region.bufferImageHeight = currentHeight;
1672 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, currentMipLevel, 0, 1};
1673 region.imageOffset = {0, 0, 0};
1674 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1675 currentWidth = SkTMax(1, currentWidth / 2);
1676 currentHeight = SkTMax(1, currentHeight / 2);
1677 }
1678
1679 VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, info->fImage, info->fImageLayout,
1680 regions.count(), regions.begin()));
1681 } else {
1682 SkASSERT(color);
1683 VkClearColorValue vkColor;
1684 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1685 // uint32 union members in those cases.
1686 vkColor.float32[0] = color->fR;
1687 vkColor.float32[1] = color->fG;
1688 vkColor.float32[2] = color->fB;
1689 vkColor.float32[3] = color->fA;
1690 VkImageSubresourceRange range;
1691 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1692 range.baseArrayLayer = 0;
1693 range.baseMipLevel = 0;
1694 range.layerCount = 1;
1695 range.levelCount = mipLevelCount;
1696 VK_CALL(CmdClearColorImage(cmdBuffer, info->fImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1697 &vkColor, 1, &range));
1698 }
1699
1700 if (!srcData && renderable) {
1701 SkASSERT(color);
1702
1703 // Change image layout to color-attachment-optimal since if we use this texture as a
1704 // borrowed texture within Ganesh we are probably going to render to it
1705 set_image_layout(this->vkInterface(), cmdBuffer, info,
1706 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, mipLevelCount,
1707 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
1708 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1709 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
1710 } else if (texturable) {
1711 // Change image layout to shader read since if we use this texture as a borrowed
1712 // texture within Ganesh we require that its layout be set to that
1713 set_image_layout(this->vkInterface(), cmdBuffer, info,
1714 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, mipLevelCount,
1715 VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
1716 }
1717
1718 // End CommandBuffer
1719 err = VK_CALL(EndCommandBuffer(cmdBuffer));
1720 SkASSERT(!err);
1721
1722 // Create Fence for queue
1723 VkFenceCreateInfo fenceInfo;
1724 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
1725 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
1726 fenceInfo.pNext = nullptr;
1727 fenceInfo.flags = 0;
1728 VkFence fence = VK_NULL_HANDLE;
1729
1730 err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
1731 SkASSERT(!err);
1732
1733 VkProtectedSubmitInfo protectedSubmitInfo;
1734 if (fProtectedContext == GrProtected::kYes) {
1735 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
1736 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
1737 protectedSubmitInfo.pNext = nullptr;
1738 protectedSubmitInfo.protectedSubmit = VK_TRUE;
1739 }
1740
1741 VkSubmitInfo submitInfo;
1742 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1743 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1744 submitInfo.pNext = fProtectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
1745 submitInfo.waitSemaphoreCount = 0;
1746 submitInfo.pWaitSemaphores = nullptr;
1747 submitInfo.pWaitDstStageMask = 0;
1748 submitInfo.commandBufferCount = 1;
1749 submitInfo.pCommandBuffers = &cmdBuffer;
1750 submitInfo.signalSemaphoreCount = 0;
1751 submitInfo.pSignalSemaphores = nullptr;
1752 err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
1753 SkASSERT(!err);
1754
1755 err = VK_CALL(WaitForFences(this->device(), 1, &fence, VK_TRUE, UINT64_MAX));
1756 if (VK_TIMEOUT == err) {
1757 GrVkImage::DestroyImageInfo(this, info);
1758 if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
1759 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
1760 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1761 }
1762 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
1763 VK_CALL(DestroyFence(this->device(), fence, nullptr));
1764 SkDebugf("Fence failed to signal: %d\n", err);
1765 SK_ABORT("failing");
1766 }
1767 SkASSERT(!err);
1768
1769 // Clean up transfer resources
1770 if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
1771 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
1772 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
1773 }
1774 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
1775 VK_CALL(DestroyFence(this->device(), fence, nullptr));
1776
1777 return true;
1778 }
1779
onCreateBackendTexture(int w,int h,const GrBackendFormat & format,GrMipMapped mipMapped,GrRenderable renderable,const SkPixmap srcData[],int numMipLevels,const SkColor4f * color,GrProtected isProtected)1780 GrBackendTexture GrVkGpu::onCreateBackendTexture(int w, int h,
1781 const GrBackendFormat& format,
1782 GrMipMapped mipMapped,
1783 GrRenderable renderable,
1784 const SkPixmap srcData[], int numMipLevels,
1785 const SkColor4f* color, GrProtected isProtected) {
1786 this->handleDirtyContext();
1787
1788 const GrVkCaps& caps = this->vkCaps();
1789
1790 // GrGpu::createBackendTexture should've ensured these conditions
1791 SkASSERT(w >= 1 && w <= caps.maxTextureSize() && h >= 1 && h <= caps.maxTextureSize());
1792 SkASSERT(GrGpu::MipMapsAreCorrect(w, h, mipMapped, srcData, numMipLevels));
1793 SkASSERT(mipMapped == GrMipMapped::kNo || caps.mipMapSupport());
1794
1795 if (fProtectedContext != isProtected) {
1796 return GrBackendTexture();
1797 }
1798
1799 VkFormat vkFormat;
1800 if (!format.asVkFormat(&vkFormat)) {
1801 SkDebugf("Could net get vkformat\n");
1802 return GrBackendTexture();
1803 }
1804
1805 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1806 if (!caps.isVkFormatTexturable(vkFormat)) {
1807 SkDebugf("Config is not texturable\n");
1808 return GrBackendTexture();
1809 }
1810
1811 if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
1812 SkDebugf("Can't create BackendTexture that requires Ycbcb sampler.\n");
1813 return GrBackendTexture();
1814 }
1815
1816 GrVkImageInfo info;
1817 if (!this->createVkImageForBackendSurface(vkFormat, w, h, true,
1818 GrRenderable::kYes == renderable, mipMapped,
1819 srcData, numMipLevels, color, &info, isProtected)) {
1820 SkDebugf("Failed to create testing only image\n");
1821 return GrBackendTexture();
1822 }
1823
1824 return GrBackendTexture(w, h, info);
1825 }
1826
deleteBackendTexture(const GrBackendTexture & tex)1827 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1828 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1829
1830 GrVkImageInfo info;
1831 if (tex.getVkImageInfo(&info)) {
1832 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1833 }
1834 }
1835
1836 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const1837 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1838 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1839
1840 GrVkImageInfo backend;
1841 if (!tex.getVkImageInfo(&backend)) {
1842 return false;
1843 }
1844
1845 if (backend.fImage && backend.fAlloc.fMemory) {
1846 VkMemoryRequirements req;
1847 memset(&req, 0, sizeof(req));
1848 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1849 backend.fImage,
1850 &req));
1851 // TODO: find a better check
1852 // This will probably fail with a different driver
1853 return (req.size > 0) && (req.size <= 8192 * 8192);
1854 }
1855
1856 return false;
1857 }
1858
createTestingOnlyBackendRenderTarget(int w,int h,GrColorType ct)1859 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(int w, int h, GrColorType ct) {
1860 this->handleDirtyContext();
1861
1862 if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) {
1863 return GrBackendRenderTarget();
1864 }
1865
1866 VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
1867
1868 GrVkImageInfo info;
1869 if (!this->createVkImageForBackendSurface(vkFormat, w, h, false, true, GrMipMapped::kNo,
1870 nullptr, 0, &SkColors::kTransparent, &info,
1871 GrProtected::kNo)) {
1872 return {};
1873 }
1874
1875 return GrBackendRenderTarget(w, h, 1, 0, info);
1876 }
1877
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)1878 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1879 SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
1880
1881 GrVkImageInfo info;
1882 if (rt.getVkImageInfo(&info)) {
1883 // something in the command buffer may still be using this, so force submit
1884 this->submitCommandBuffer(kForce_SyncQueue);
1885 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1886 }
1887 }
1888
testingOnly_flushGpuAndSync()1889 void GrVkGpu::testingOnly_flushGpuAndSync() {
1890 this->submitCommandBuffer(kForce_SyncQueue);
1891 }
1892 #endif
1893
1894 ////////////////////////////////////////////////////////////////////////////////
1895
addBufferMemoryBarrier(const GrVkResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const1896 void GrVkGpu::addBufferMemoryBarrier(const GrVkResource* resource,
1897 VkPipelineStageFlags srcStageMask,
1898 VkPipelineStageFlags dstStageMask,
1899 bool byRegion,
1900 VkBufferMemoryBarrier* barrier) const {
1901 SkASSERT(fCurrentCmdBuffer);
1902 SkASSERT(resource);
1903 fCurrentCmdBuffer->pipelineBarrier(this,
1904 resource,
1905 srcStageMask,
1906 dstStageMask,
1907 byRegion,
1908 GrVkCommandBuffer::kBufferMemory_BarrierType,
1909 barrier);
1910 }
1911
addImageMemoryBarrier(const GrVkResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const1912 void GrVkGpu::addImageMemoryBarrier(const GrVkResource* resource,
1913 VkPipelineStageFlags srcStageMask,
1914 VkPipelineStageFlags dstStageMask,
1915 bool byRegion,
1916 VkImageMemoryBarrier* barrier) const {
1917 SkASSERT(fCurrentCmdBuffer);
1918 SkASSERT(resource);
1919 fCurrentCmdBuffer->pipelineBarrier(this,
1920 resource,
1921 srcStageMask,
1922 dstStageMask,
1923 byRegion,
1924 GrVkCommandBuffer::kImageMemory_BarrierType,
1925 barrier);
1926 }
1927
onFinishFlush(GrSurfaceProxy * proxies[],int n,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrPrepareForExternalIORequests & externalRequests)1928 void GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n,
1929 SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
1930 const GrPrepareForExternalIORequests& externalRequests) {
1931 SkASSERT(n >= 0);
1932 SkASSERT(!n || proxies);
1933 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
1934 // not effect what we do here.
1935 if (n && access == SkSurface::BackendSurfaceAccess::kPresent) {
1936 GrVkImage* image;
1937 for (int i = 0; i < n; ++i) {
1938 SkASSERT(proxies[i]->isInstantiated());
1939 if (GrTexture* tex = proxies[i]->peekTexture()) {
1940 image = static_cast<GrVkTexture*>(tex);
1941 } else {
1942 GrRenderTarget* rt = proxies[i]->peekRenderTarget();
1943 SkASSERT(rt);
1944 image = static_cast<GrVkRenderTarget*>(rt);
1945 }
1946 image->prepareForPresent(this);
1947 }
1948 }
1949
1950 // Handle requests for preparing for external IO
1951 for (int i = 0; i < externalRequests.fNumImages; ++i) {
1952 SkImage* image = externalRequests.fImages[i];
1953 if (!image->isTextureBacked()) {
1954 continue;
1955 }
1956 SkImage_GpuBase* gpuImage = static_cast<SkImage_GpuBase*>(as_IB(image));
1957 sk_sp<GrTextureProxy> proxy = gpuImage->asTextureProxyRef(this->getContext());
1958 SkASSERT(proxy);
1959
1960 if (!proxy->isInstantiated()) {
1961 auto resourceProvider = this->getContext()->priv().resourceProvider();
1962 if (!proxy->instantiate(resourceProvider)) {
1963 continue;
1964 }
1965 }
1966
1967 GrTexture* tex = proxy->peekTexture();
1968 if (!tex) {
1969 continue;
1970 }
1971 GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
1972 vkTex->prepareForExternal(this);
1973 }
1974 for (int i = 0; i < externalRequests.fNumSurfaces; ++i) {
1975 SkSurface* surface = externalRequests.fSurfaces[i];
1976 if (!surface->getCanvas()->getGrContext()) {
1977 continue;
1978 }
1979 SkSurface_Gpu* gpuSurface = static_cast<SkSurface_Gpu*>(surface);
1980 auto* rtc = gpuSurface->getDevice()->accessRenderTargetContext();
1981 sk_sp<GrRenderTargetProxy> proxy = rtc->asRenderTargetProxyRef();
1982 if (!proxy->isInstantiated()) {
1983 auto resourceProvider = this->getContext()->priv().resourceProvider();
1984 if (!proxy->instantiate(resourceProvider)) {
1985 continue;
1986 }
1987 }
1988
1989 GrRenderTarget* rt = proxy->peekRenderTarget();
1990 SkASSERT(rt);
1991 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1992 if (externalRequests.fPrepareSurfaceForPresent &&
1993 externalRequests.fPrepareSurfaceForPresent[i]) {
1994 vkRT->prepareForPresent(this);
1995 } else {
1996 vkRT->prepareForExternal(this);
1997 }
1998 }
1999
2000 if (info.fFlags & kSyncCpu_GrFlushFlag) {
2001 this->submitCommandBuffer(kForce_SyncQueue, info.fFinishedProc, info.fFinishedContext);
2002 } else {
2003 this->submitCommandBuffer(kSkip_SyncQueue, info.fFinishedProc, info.fFinishedContext);
2004 }
2005 }
2006
get_surface_sample_cnt(GrSurface * surf)2007 static int get_surface_sample_cnt(GrSurface* surf) {
2008 if (const GrRenderTarget* rt = surf->asRenderTarget()) {
2009 return rt->numSamples();
2010 }
2011 return 0;
2012 }
2013
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2014 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
2015 GrVkImage* srcImage, const SkIRect& srcRect,
2016 const SkIPoint& dstPoint) {
2017 #ifdef SK_DEBUG
2018 int dstSampleCnt = get_surface_sample_cnt(dst);
2019 int srcSampleCnt = get_surface_sample_cnt(src);
2020 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2021 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2022 VkFormat dstFormat = dstImage->imageFormat();
2023 VkFormat srcFormat;
2024 SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2025 SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2026 srcFormat, srcSampleCnt, srcHasYcbcr));
2027 #endif
2028 if (src->isProtected() && !dst->isProtected()) {
2029 SkDebugf("Can't copy from protected memory to non-protected");
2030 return;
2031 }
2032
2033 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2034 // the cache is flushed since it is only being written to.
2035 dstImage->setImageLayout(this,
2036 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2037 VK_ACCESS_TRANSFER_WRITE_BIT,
2038 VK_PIPELINE_STAGE_TRANSFER_BIT,
2039 false);
2040
2041 srcImage->setImageLayout(this,
2042 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2043 VK_ACCESS_TRANSFER_READ_BIT,
2044 VK_PIPELINE_STAGE_TRANSFER_BIT,
2045 false);
2046
2047 VkImageCopy copyRegion;
2048 memset(©Region, 0, sizeof(VkImageCopy));
2049 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2050 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2051 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2052 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2053 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2054
2055 fCurrentCmdBuffer->copyImage(this,
2056 srcImage,
2057 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2058 dstImage,
2059 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2060 1,
2061 ©Region);
2062
2063 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2064 srcRect.width(), srcRect.height());
2065 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2066 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2067 }
2068
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2069 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
2070 GrVkImage* srcImage, const SkIRect& srcRect,
2071 const SkIPoint& dstPoint) {
2072 #ifdef SK_DEBUG
2073 int dstSampleCnt = get_surface_sample_cnt(dst);
2074 int srcSampleCnt = get_surface_sample_cnt(src);
2075 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2076 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2077 VkFormat dstFormat = dstImage->imageFormat();
2078 VkFormat srcFormat;
2079 SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2080 SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
2081 dstHasYcbcr, srcFormat, srcSampleCnt,
2082 srcImage->isLinearTiled(), srcHasYcbcr));
2083
2084 #endif
2085 if (src->isProtected() && !dst->isProtected()) {
2086 SkDebugf("Can't copy from protected memory to non-protected");
2087 return;
2088 }
2089
2090 dstImage->setImageLayout(this,
2091 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2092 VK_ACCESS_TRANSFER_WRITE_BIT,
2093 VK_PIPELINE_STAGE_TRANSFER_BIT,
2094 false);
2095
2096 srcImage->setImageLayout(this,
2097 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2098 VK_ACCESS_TRANSFER_READ_BIT,
2099 VK_PIPELINE_STAGE_TRANSFER_BIT,
2100 false);
2101
2102 // Flip rect if necessary
2103 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
2104 srcRect.height());
2105
2106 VkImageBlit blitRegion;
2107 memset(&blitRegion, 0, sizeof(VkImageBlit));
2108 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2109 blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2110 blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2111 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2112 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2113 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2114
2115 fCurrentCmdBuffer->blitImage(this,
2116 *srcImage,
2117 *dstImage,
2118 1,
2119 &blitRegion,
2120 VK_FILTER_NEAREST); // We never scale so any filter works here
2121
2122 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2123 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2124 }
2125
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2126 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2127 const SkIPoint& dstPoint) {
2128 if (src->isProtected() && !dst->isProtected()) {
2129 SkDebugf("Can't copy from protected memory to non-protected");
2130 return;
2131 }
2132 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2133 this->resolveImage(dst, srcRT, srcRect, dstPoint);
2134 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2135 srcRect.width(), srcRect.height());
2136 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2137 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2138 }
2139
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2140 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2141 const SkIPoint& dstPoint) {
2142 #ifdef SK_DEBUG
2143 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2144 SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2145 }
2146 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2147 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2148 }
2149 #endif
2150 if (src->isProtected() && !dst->isProtected()) {
2151 SkDebugf("Can't copy from protected memory to non-protected");
2152 return false;
2153 }
2154
2155 int dstSampleCnt = get_surface_sample_cnt(dst);
2156 int srcSampleCnt = get_surface_sample_cnt(src);
2157
2158 GrVkImage* dstImage;
2159 GrVkImage* srcImage;
2160 GrRenderTarget* dstRT = dst->asRenderTarget();
2161 if (dstRT) {
2162 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2163 if (vkRT->wrapsSecondaryCommandBuffer()) {
2164 return false;
2165 }
2166 dstImage = vkRT->numSamples() > 1 ? vkRT->msaaImage() : vkRT;
2167 } else {
2168 SkASSERT(dst->asTexture());
2169 dstImage = static_cast<GrVkTexture*>(dst->asTexture());
2170 }
2171 GrRenderTarget* srcRT = src->asRenderTarget();
2172 if (srcRT) {
2173 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2174 srcImage = vkRT->numSamples() > 1 ? vkRT->msaaImage() : vkRT;
2175 } else {
2176 SkASSERT(src->asTexture());
2177 srcImage = static_cast<GrVkTexture*>(src->asTexture());
2178 }
2179
2180 VkFormat dstFormat = dstImage->imageFormat();
2181 VkFormat srcFormat = srcImage->imageFormat();
2182
2183 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2184 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2185
2186 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2187 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2188 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2189 return true;
2190 }
2191
2192 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2193 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2194 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2195 return true;
2196 }
2197
2198 if (this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
2199 dstHasYcbcr, srcFormat, srcSampleCnt,
2200 srcImage->isLinearTiled(), srcHasYcbcr)) {
2201 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
2202 return true;
2203 }
2204
2205 return false;
2206 }
2207
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2208 bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
2209 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
2210 size_t rowBytes) {
2211 if (surface->isProtected()) {
2212 return false;
2213 }
2214
2215 if (surfaceColorType != dstColorType) {
2216 return false;
2217 }
2218
2219 GrVkImage* image = nullptr;
2220 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2221 if (rt) {
2222 // Reading from render targets that wrap a secondary command buffer is not allowed since
2223 // it would require us to know the VkImage, which we don't have, as well as need us to
2224 // stop and start the VkRenderPass which we don't have access to.
2225 if (rt->wrapsSecondaryCommandBuffer()) {
2226 return false;
2227 }
2228 image = rt;
2229 } else {
2230 image = static_cast<GrVkTexture*>(surface->asTexture());
2231 }
2232
2233 if (!image) {
2234 return false;
2235 }
2236
2237 // Skia's RGB_888x color type, which we map to the vulkan R8G8B8_UNORM, expects the data to be
2238 // 32 bits, but the Vulkan format is only 24. So we first copy the surface into an R8G8B8A8
2239 // image and then do the read pixels from that.
2240 sk_sp<GrVkTextureRenderTarget> copySurface;
2241 if (dstColorType == GrColorType::kRGB_888x && image->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
2242 int srcSampleCount = 0;
2243 if (rt) {
2244 srcSampleCount = rt->numSamples();
2245 }
2246 bool srcHasYcbcr = image->ycbcrConversionInfo().isValid();
2247 if (!this->vkCaps().canCopyAsBlit(VK_FORMAT_R8G8B8A8_UNORM, 1, false, false,
2248 image->imageFormat(), srcSampleCount,
2249 image->isLinearTiled(), srcHasYcbcr)) {
2250 return false;
2251 }
2252
2253 // Make a new surface that is RGBA to copy the RGB surface into.
2254 GrSurfaceDesc surfDesc;
2255 surfDesc.fWidth = width;
2256 surfDesc.fHeight = height;
2257 surfDesc.fConfig = kRGBA_8888_GrPixelConfig;
2258
2259 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2260 VK_IMAGE_USAGE_SAMPLED_BIT |
2261 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2262 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2263
2264 GrVkImage::ImageDesc imageDesc;
2265 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
2266 imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
2267 imageDesc.fWidth = width;
2268 imageDesc.fHeight = height;
2269 imageDesc.fLevels = 1;
2270 imageDesc.fSamples = 1;
2271 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
2272 imageDesc.fUsageFlags = usageFlags;
2273 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
2274
2275 copySurface = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
2276 this, SkBudgeted::kYes, surfDesc, 1, imageDesc, GrMipMapsStatus::kNotAllocated);
2277 if (!copySurface) {
2278 return false;
2279 }
2280
2281 SkIRect srcRect = SkIRect::MakeXYWH(left, top, width, height);
2282 SkAssertResult(this->copySurface(copySurface.get(), surface, srcRect, SkIPoint::Make(0,0)));
2283
2284 top = 0;
2285 left = 0;
2286 dstColorType = GrColorType::kRGBA_8888;
2287 image = copySurface.get();
2288 }
2289
2290 // Change layout of our target so it can be used as copy
2291 image->setImageLayout(this,
2292 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2293 VK_ACCESS_TRANSFER_READ_BIT,
2294 VK_PIPELINE_STAGE_TRANSFER_BIT,
2295 false);
2296
2297 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2298 size_t tightRowBytes = bpp * width;
2299
2300 VkBufferImageCopy region;
2301 memset(®ion, 0, sizeof(VkBufferImageCopy));
2302
2303 bool copyFromOrigin = this->vkCaps().mustDoCopiesFromOrigin();
2304 if (copyFromOrigin) {
2305 region.imageOffset = { 0, 0, 0 };
2306 region.imageExtent = { (uint32_t)(left + width), (uint32_t)(top + height), 1 };
2307 } else {
2308 VkOffset3D offset = { left, top, 0 };
2309 region.imageOffset = offset;
2310 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
2311 }
2312
2313 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2314 size_t imageRows = region.imageExtent.height;
2315 auto transferBuffer = sk_sp<GrVkTransferBuffer>(
2316 static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * imageRows,
2317 GrGpuBufferType::kXferGpuToCpu,
2318 kStream_GrAccessPattern)
2319 .release()));
2320
2321 // Copy the image to a buffer so we can map it to cpu memory
2322 region.bufferOffset = transferBuffer->offset();
2323 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2324 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2325 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2326
2327 fCurrentCmdBuffer->copyImageToBuffer(this,
2328 image,
2329 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2330 transferBuffer.get(),
2331 1,
2332 ®ion);
2333
2334 // make sure the copy to buffer has finished
2335 transferBuffer->addMemoryBarrier(this,
2336 VK_ACCESS_TRANSFER_WRITE_BIT,
2337 VK_ACCESS_HOST_READ_BIT,
2338 VK_PIPELINE_STAGE_TRANSFER_BIT,
2339 VK_PIPELINE_STAGE_HOST_BIT,
2340 false);
2341
2342 // We need to submit the current command buffer to the Queue and make sure it finishes before
2343 // we can copy the data out of the buffer.
2344 this->submitCommandBuffer(kForce_SyncQueue);
2345 void* mappedMemory = transferBuffer->map();
2346 const GrVkAlloc& transAlloc = transferBuffer->alloc();
2347 GrVkMemory::InvalidateMappedAlloc(this, transAlloc, 0, transAlloc.fSize);
2348
2349 if (copyFromOrigin) {
2350 uint32_t skipRows = region.imageExtent.height - height;
2351 mappedMemory = (char*)mappedMemory + transBufferRowBytes * skipRows + bpp * left;
2352 }
2353
2354 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, height);
2355
2356 transferBuffer->unmap();
2357 return true;
2358 }
2359
2360 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
2361 // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
2362 // the the entire attachment. Similar requirements for the y and height components.
adjust_bounds_to_granularity(SkIRect * dstBounds,const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)2363 void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds,
2364 const VkExtent2D& granularity, int maxWidth, int maxHeight) {
2365 // Adjust Width
2366 if ((0 != granularity.width && 1 != granularity.width)) {
2367 // Start with the right side of rect so we know if we end up going pass the maxWidth.
2368 int rightAdj = srcBounds.fRight % granularity.width;
2369 if (rightAdj != 0) {
2370 rightAdj = granularity.width - rightAdj;
2371 }
2372 dstBounds->fRight = srcBounds.fRight + rightAdj;
2373 if (dstBounds->fRight > maxWidth) {
2374 dstBounds->fRight = maxWidth;
2375 dstBounds->fLeft = 0;
2376 } else {
2377 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
2378 }
2379 } else {
2380 dstBounds->fLeft = srcBounds.fLeft;
2381 dstBounds->fRight = srcBounds.fRight;
2382 }
2383
2384 // Adjust height
2385 if ((0 != granularity.height && 1 != granularity.height)) {
2386 // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
2387 int bottomAdj = srcBounds.fBottom % granularity.height;
2388 if (bottomAdj != 0) {
2389 bottomAdj = granularity.height - bottomAdj;
2390 }
2391 dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
2392 if (dstBounds->fBottom > maxHeight) {
2393 dstBounds->fBottom = maxHeight;
2394 dstBounds->fTop = 0;
2395 } else {
2396 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
2397 }
2398 } else {
2399 dstBounds->fTop = srcBounds.fTop;
2400 dstBounds->fBottom = srcBounds.fBottom;
2401 }
2402 }
2403
beginRenderPass(const GrVkRenderPass * renderPass,const VkClearValue * colorClear,GrVkRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds,bool forSecondaryCB)2404 void GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2405 const VkClearValue* colorClear,
2406 GrVkRenderTarget* target, GrSurfaceOrigin origin,
2407 const SkIRect& bounds, bool forSecondaryCB) {
2408 SkASSERT (!target->wrapsSecondaryCommandBuffer());
2409 auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, target->height(), bounds);
2410
2411 // The bounds we use for the render pass should be of the granularity supported
2412 // by the device.
2413 const VkExtent2D& granularity = renderPass->granularity();
2414 SkIRect adjustedBounds;
2415 if ((0 != granularity.width && 1 != granularity.width) ||
2416 (0 != granularity.height && 1 != granularity.height)) {
2417 adjust_bounds_to_granularity(&adjustedBounds, nativeBounds.asSkIRect(), granularity,
2418 target->width(), target->height());
2419 } else {
2420 adjustedBounds = nativeBounds.asSkIRect();
2421 }
2422
2423 #ifdef SK_DEBUG
2424 uint32_t index;
2425 bool result = renderPass->colorAttachmentIndex(&index);
2426 SkASSERT(result && 0 == index);
2427 result = renderPass->stencilAttachmentIndex(&index);
2428 if (result) {
2429 SkASSERT(1 == index);
2430 }
2431 #endif
2432 VkClearValue clears[2];
2433 clears[0].color = colorClear->color;
2434 clears[1].depthStencil.depth = 0.0f;
2435 clears[1].depthStencil.stencil = 0;
2436
2437 fCurrentCmdBuffer->beginRenderPass(this, renderPass, clears, *target, adjustedBounds,
2438 forSecondaryCB);
2439 }
2440
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2441 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2442 const SkIRect& bounds) {
2443 fCurrentCmdBuffer->endRenderPass(this);
2444 this->didWriteToSurface(target, origin, &bounds);
2445 }
2446
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2447 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2448 fCurrentCmdBuffer->executeCommands(this, std::move(buffer));
2449 }
2450
submit(GrOpsRenderPass * renderPass)2451 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2452 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2453
2454 fCachedOpsRenderPass->submit();
2455 fCachedOpsRenderPass->reset();
2456 }
2457
insertFence()2458 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2459 VkFenceCreateInfo createInfo;
2460 memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2461 createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2462 createInfo.pNext = nullptr;
2463 createInfo.flags = 0;
2464 VkFence fence = VK_NULL_HANDLE;
2465
2466 VK_CALL_ERRCHECK(CreateFence(this->device(), &createInfo, nullptr, &fence));
2467 VK_CALL(QueueSubmit(this->queue(), 0, nullptr, fence));
2468
2469 GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(VkFence));
2470 return (GrFence)fence;
2471 }
2472
waitFence(GrFence fence,uint64_t timeout)2473 bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) {
2474 SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2475
2476 VkResult result = VK_CALL(WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
2477 return (VK_SUCCESS == result);
2478 }
2479
deleteFence(GrFence fence) const2480 void GrVkGpu::deleteFence(GrFence fence) const {
2481 VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2482 }
2483
makeSemaphore(bool isOwned)2484 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2485 return GrVkSemaphore::Make(this, isOwned);
2486 }
2487
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)2488 sk_sp<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2489 GrResourceProvider::SemaphoreWrapType wrapType,
2490 GrWrapOwnership ownership) {
2491 return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2492 }
2493
insertSemaphore(sk_sp<GrSemaphore> semaphore)2494 void GrVkGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
2495 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
2496
2497 GrVkSemaphore::Resource* resource = vkSem->getResource();
2498 if (resource->shouldSignal()) {
2499 resource->ref();
2500 fSemaphoresToSignal.push_back(resource);
2501 }
2502 }
2503
waitSemaphore(sk_sp<GrSemaphore> semaphore)2504 void GrVkGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
2505 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
2506
2507 GrVkSemaphore::Resource* resource = vkSem->getResource();
2508 if (resource->shouldWait()) {
2509 resource->ref();
2510 fSemaphoresToWaitOn.push_back(resource);
2511 }
2512 }
2513
prepareTextureForCrossContextUsage(GrTexture * texture)2514 sk_sp<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2515 SkASSERT(texture);
2516 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
2517 vkTexture->setImageLayout(this,
2518 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2519 VK_ACCESS_SHADER_READ_BIT,
2520 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2521 false);
2522 this->submitCommandBuffer(kSkip_SyncQueue);
2523
2524 // The image layout change serves as a barrier, so no semaphore is needed.
2525 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2526 // thread safe so that only the first thread that tries to use the semaphore actually submits
2527 // it. This additionally would also require thread safety in command buffer submissions to
2528 // queues in general.
2529 return nullptr;
2530 }
2531
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2532 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2533 fDrawables.emplace_back(std::move(drawable));
2534 }
2535
getExtraSamplerKeyForProgram(const GrSamplerState & samplerState,const GrBackendFormat & format)2536 uint32_t GrVkGpu::getExtraSamplerKeyForProgram(const GrSamplerState& samplerState,
2537 const GrBackendFormat& format) {
2538 const GrVkYcbcrConversionInfo* ycbcrInfo = format.getVkYcbcrConversionInfo();
2539 SkASSERT(ycbcrInfo);
2540 if (!ycbcrInfo->isValid()) {
2541 return 0;
2542 }
2543
2544 const GrVkSampler* sampler = this->resourceProvider().findOrCreateCompatibleSampler(
2545 samplerState, *ycbcrInfo);
2546
2547 uint32_t result = sampler->uniqueID();
2548
2549 sampler->unref(this);
2550
2551 return result;
2552 }
2553
storeVkPipelineCacheData()2554 void GrVkGpu::storeVkPipelineCacheData() {
2555 if (this->getContext()->priv().getPersistentCache()) {
2556 this->resourceProvider().storePipelineCacheData();
2557 }
2558 }
2559