1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkCommandBuffer.h"
9 
10 #include "include/core/SkRect.h"
11 #include "src/gpu/vk/GrVkCommandPool.h"
12 #include "src/gpu/vk/GrVkFramebuffer.h"
13 #include "src/gpu/vk/GrVkGpu.h"
14 #include "src/gpu/vk/GrVkImage.h"
15 #include "src/gpu/vk/GrVkImageView.h"
16 #include "src/gpu/vk/GrVkMeshBuffer.h"
17 #include "src/gpu/vk/GrVkPipeline.h"
18 #include "src/gpu/vk/GrVkPipelineState.h"
19 #include "src/gpu/vk/GrVkPipelineState.h"
20 #include "src/gpu/vk/GrVkRenderPass.h"
21 #include "src/gpu/vk/GrVkRenderTarget.h"
22 #include "src/gpu/vk/GrVkTransferBuffer.h"
23 #include "src/gpu/vk/GrVkUtil.h"
24 
invalidateState()25 void GrVkCommandBuffer::invalidateState() {
26     for (auto& boundInputBuffer : fBoundInputBuffers) {
27         boundInputBuffer = VK_NULL_HANDLE;
28     }
29     fBoundIndexBuffer = VK_NULL_HANDLE;
30 
31     memset(&fCachedViewport, 0, sizeof(VkViewport));
32     fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
33 
34     memset(&fCachedScissor, 0, sizeof(VkRect2D));
35     fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
36 
37     for (int i = 0; i < 4; ++i) {
38         fCachedBlendConstant[i] = -1.0;
39     }
40 }
41 
freeGPUData(const GrGpu * gpu,VkCommandPool cmdPool) const42 void GrVkCommandBuffer::freeGPUData(const GrGpu* gpu, VkCommandPool cmdPool) const {
43     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
44     SkASSERT(!fIsActive);
45     SkASSERT(!fTrackedResources.count());
46     SkASSERT(!fTrackedRecycledResources.count());
47     SkASSERT(!fTrackedGpuBuffers.count());
48     SkASSERT(!fTrackedGpuSurfaces.count());
49     SkASSERT(cmdPool != VK_NULL_HANDLE);
50     SkASSERT(!this->isWrapped());
51 
52     GrVkGpu* vkGpu = (GrVkGpu*)gpu;
53     GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer));
54 
55     this->onFreeGPUData(vkGpu);
56 }
57 
releaseResources()58 void GrVkCommandBuffer::releaseResources() {
59     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
60     SkASSERT(!fIsActive);
61     for (int i = 0; i < fTrackedResources.count(); ++i) {
62         fTrackedResources[i]->notifyFinishedWithWorkOnGpu();
63         fTrackedResources[i]->unref();
64     }
65     for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
66         fTrackedRecycledResources[i]->notifyFinishedWithWorkOnGpu();
67         fTrackedRecycledResources[i]->recycle();
68     }
69 
70     if (++fNumResets > kNumRewindResetsBeforeFullReset) {
71         fTrackedResources.reset();
72         fTrackedRecycledResources.reset();
73         fTrackedResources.setReserve(kInitialTrackedResourcesCount);
74         fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
75         fNumResets = 0;
76     } else {
77         fTrackedResources.rewind();
78         fTrackedRecycledResources.rewind();
79     }
80 
81     fTrackedGpuBuffers.reset();
82     fTrackedGpuSurfaces.reset();
83 
84     this->invalidateState();
85 
86     this->onReleaseResources();
87 }
88 
89 ////////////////////////////////////////////////////////////////////////////////
90 // CommandBuffer commands
91 ////////////////////////////////////////////////////////////////////////////////
92 
pipelineBarrier(const GrVkGpu * gpu,const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)93 void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
94                                         const GrManagedResource* resource,
95                                         VkPipelineStageFlags srcStageMask,
96                                         VkPipelineStageFlags dstStageMask,
97                                         bool byRegion,
98                                         BarrierType barrierType,
99                                         void* barrier) {
100     SkASSERT(!this->isWrapped());
101     SkASSERT(fIsActive);
102 #ifdef SK_DEBUG
103     // For images we can have barriers inside of render passes but they require us to add more
104     // support in subpasses which need self dependencies to have barriers inside them. Also, we can
105     // never have buffer barriers inside of a render pass. For now we will just assert that we are
106     // not in a render pass.
107     bool isValidSubpassBarrier = false;
108     if (barrierType == kImageMemory_BarrierType) {
109         VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
110         isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
111                                 (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
112                                 (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
113                                 byRegion;
114     }
115     SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
116 #endif
117 
118     if (barrierType == kBufferMemory_BarrierType) {
119         const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
120         fBufferBarriers.push_back(*barrierPtr);
121     } else {
122         SkASSERT(barrierType == kImageMemory_BarrierType);
123         const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
124         // We need to check if we are adding a pipeline barrier that covers part of the same
125         // subresource range as a barrier that is already in current batch. If it does, then we must
126         // submit the first batch because the vulkan spec does not define a specific ordering for
127         // barriers submitted in the same batch.
128         // TODO: Look if we can gain anything by merging barriers together instead of submitting
129         // the old ones.
130         for (int i = 0; i < fImageBarriers.count(); ++i) {
131             VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
132             if (barrierPtr->image == currentBarrier.image) {
133                 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
134                 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
135                 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
136                 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
137                 SkASSERT(newRange.layerCount == oldRange.layerCount);
138                 uint32_t newStart = newRange.baseMipLevel;
139                 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
140                 uint32_t oldStart = oldRange.baseMipLevel;
141                 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
142                 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
143                     this->submitPipelineBarriers(gpu);
144                     break;
145                 }
146             }
147         }
148         fImageBarriers.push_back(*barrierPtr);
149     }
150     fBarriersByRegion |= byRegion;
151     fSrcStageMask = fSrcStageMask | srcStageMask;
152     fDstStageMask = fDstStageMask | dstStageMask;
153 
154     fHasWork = true;
155     if (resource) {
156         this->addResource(resource);
157     }
158     if (fActiveRenderPass) {
159         this->submitPipelineBarriers(gpu, true);
160     }
161 }
162 
submitPipelineBarriers(const GrVkGpu * gpu,bool forSelfDependency)163 void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency) {
164     SkASSERT(fIsActive);
165 
166     // Currently we never submit a pipeline barrier without at least one memory barrier.
167     if (fBufferBarriers.count() || fImageBarriers.count()) {
168         // For images we can have barriers inside of render passes but they require us to add more
169         // support in subpasses which need self dependencies to have barriers inside them. Also, we
170         // can never have buffer barriers inside of a render pass. For now we will just assert that
171         // we are not in a render pass.
172         SkASSERT(!fActiveRenderPass || forSelfDependency);
173         SkASSERT(!this->isWrapped());
174         SkASSERT(fSrcStageMask && fDstStageMask);
175 
176         VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
177         GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
178                 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
179                 fBufferBarriers.count(), fBufferBarriers.begin(),
180                 fImageBarriers.count(), fImageBarriers.begin()));
181         fBufferBarriers.reset();
182         fImageBarriers.reset();
183         fBarriersByRegion = false;
184         fSrcStageMask = 0;
185         fDstStageMask = 0;
186     }
187     SkASSERT(!fBufferBarriers.count());
188     SkASSERT(!fImageBarriers.count());
189     SkASSERT(!fBarriersByRegion);
190     SkASSERT(!fSrcStageMask);
191     SkASSERT(!fDstStageMask);
192 }
193 
bindInputBuffer(GrVkGpu * gpu,uint32_t binding,sk_sp<const GrBuffer> buffer)194 void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
195                                         sk_sp<const GrBuffer> buffer) {
196     auto* vkMeshBuffer = static_cast<const GrVkMeshBuffer*>(buffer.get());
197     VkBuffer vkBuffer = vkMeshBuffer->buffer();
198     SkASSERT(VK_NULL_HANDLE != vkBuffer);
199     SkASSERT(binding < kMaxInputBuffers);
200     // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
201     // to know if we can skip binding or not.
202     if (vkBuffer != fBoundInputBuffers[binding]) {
203         VkDeviceSize offset = vkMeshBuffer->offset();
204         GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
205                                                             binding,
206                                                             1,
207                                                             &vkBuffer,
208                                                             &offset));
209         fBoundInputBuffers[binding] = vkBuffer;
210         this->addResource(vkMeshBuffer->resource());
211         this->addGrBuffer(std::move(buffer));
212     }
213 }
214 
bindIndexBuffer(GrVkGpu * gpu,sk_sp<const GrBuffer> buffer)215 void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer) {
216     auto* vkMeshBuffer = static_cast<const GrVkMeshBuffer*>(buffer.get());
217     VkBuffer vkBuffer = vkMeshBuffer->buffer();
218     SkASSERT(VK_NULL_HANDLE != vkBuffer);
219     // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
220     // to know if we can skip binding or not.
221     if (vkBuffer != fBoundIndexBuffer) {
222         GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
223                                                           vkBuffer, vkMeshBuffer->offset(),
224                                                           VK_INDEX_TYPE_UINT16));
225         fBoundIndexBuffer = vkBuffer;
226         this->addResource(vkMeshBuffer->resource());
227         this->addGrBuffer(std::move(buffer));
228     }
229 }
230 
clearAttachments(const GrVkGpu * gpu,int numAttachments,const VkClearAttachment * attachments,int numRects,const VkClearRect * clearRects)231 void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
232                                          int numAttachments,
233                                          const VkClearAttachment* attachments,
234                                          int numRects,
235                                          const VkClearRect* clearRects) {
236     SkASSERT(fIsActive);
237     SkASSERT(fActiveRenderPass);
238     SkASSERT(numAttachments > 0);
239     SkASSERT(numRects > 0);
240 
241     this->addingWork(gpu);
242 
243 #ifdef SK_DEBUG
244     for (int i = 0; i < numAttachments; ++i) {
245         if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
246             uint32_t testIndex;
247             SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
248             SkASSERT(testIndex == attachments[i].colorAttachment);
249         }
250     }
251 #endif
252     GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
253                                                        numAttachments,
254                                                        attachments,
255                                                        numRects,
256                                                        clearRects));
257     if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
258         this->invalidateState();
259     }
260 }
261 
bindDescriptorSets(const GrVkGpu * gpu,GrVkPipelineState * pipelineState,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * descriptorSets,uint32_t dynamicOffsetCount,const uint32_t * dynamicOffsets)262 void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
263                                            GrVkPipelineState* pipelineState,
264                                            VkPipelineLayout layout,
265                                            uint32_t firstSet,
266                                            uint32_t setCount,
267                                            const VkDescriptorSet* descriptorSets,
268                                            uint32_t dynamicOffsetCount,
269                                            const uint32_t* dynamicOffsets) {
270     SkASSERT(fIsActive);
271     GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
272                                                          VK_PIPELINE_BIND_POINT_GRAPHICS,
273                                                          layout,
274                                                          firstSet,
275                                                          setCount,
276                                                          descriptorSets,
277                                                          dynamicOffsetCount,
278                                                          dynamicOffsets));
279 }
280 
bindPipeline(const GrVkGpu * gpu,const GrVkPipeline * pipeline)281 void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
282     SkASSERT(fIsActive);
283     GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
284                                                    VK_PIPELINE_BIND_POINT_GRAPHICS,
285                                                    pipeline->pipeline()));
286     this->addResource(pipeline);
287 }
288 
drawIndexed(const GrVkGpu * gpu,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)289 void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
290                                     uint32_t indexCount,
291                                     uint32_t instanceCount,
292                                     uint32_t firstIndex,
293                                     int32_t vertexOffset,
294                                     uint32_t firstInstance) {
295     SkASSERT(fIsActive);
296     SkASSERT(fActiveRenderPass);
297     this->addingWork(gpu);
298     GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
299                                                   indexCount,
300                                                   instanceCount,
301                                                   firstIndex,
302                                                   vertexOffset,
303                                                   firstInstance));
304 }
305 
draw(const GrVkGpu * gpu,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)306 void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
307                              uint32_t vertexCount,
308                              uint32_t instanceCount,
309                              uint32_t firstVertex,
310                              uint32_t firstInstance) {
311     SkASSERT(fIsActive);
312     SkASSERT(fActiveRenderPass);
313     this->addingWork(gpu);
314     GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
315                                            vertexCount,
316                                            instanceCount,
317                                            firstVertex,
318                                            firstInstance));
319 }
320 
drawIndirect(const GrVkGpu * gpu,const GrVkMeshBuffer * indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)321 void GrVkCommandBuffer::drawIndirect(const GrVkGpu* gpu,
322                                      const GrVkMeshBuffer* indirectBuffer,
323                                      VkDeviceSize offset,
324                                      uint32_t drawCount,
325                                      uint32_t stride) {
326     SkASSERT(fIsActive);
327     SkASSERT(fActiveRenderPass);
328     SkASSERT(!indirectBuffer->isCpuBuffer());
329     this->addingWork(gpu);
330     this->addResource(indirectBuffer->resource());
331     GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer,
332                                                    indirectBuffer->buffer(),
333                                                    offset,
334                                                    drawCount,
335                                                    stride));
336 }
337 
drawIndexedIndirect(const GrVkGpu * gpu,const GrVkMeshBuffer * indirectBuffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)338 void GrVkCommandBuffer::drawIndexedIndirect(const GrVkGpu* gpu,
339                                             const GrVkMeshBuffer* indirectBuffer,
340                                             VkDeviceSize offset,
341                                             uint32_t drawCount,
342                                             uint32_t stride) {
343     SkASSERT(fIsActive);
344     SkASSERT(fActiveRenderPass);
345     SkASSERT(!indirectBuffer->isCpuBuffer());
346     this->addingWork(gpu);
347     this->addResource(indirectBuffer->resource());
348     GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer,
349                                                           indirectBuffer->buffer(),
350                                                           offset,
351                                                           drawCount,
352                                                           stride));
353 }
354 
setViewport(const GrVkGpu * gpu,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * viewports)355 void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
356                                     uint32_t firstViewport,
357                                     uint32_t viewportCount,
358                                     const VkViewport* viewports) {
359     SkASSERT(fIsActive);
360     SkASSERT(1 == viewportCount);
361     if (0 != memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
362         GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
363                                                       firstViewport,
364                                                       viewportCount,
365                                                       viewports));
366         fCachedViewport = viewports[0];
367     }
368 }
369 
setScissor(const GrVkGpu * gpu,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * scissors)370 void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
371                                    uint32_t firstScissor,
372                                    uint32_t scissorCount,
373                                    const VkRect2D* scissors) {
374     SkASSERT(fIsActive);
375     SkASSERT(1 == scissorCount);
376     if (0 != memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
377         GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
378                                                      firstScissor,
379                                                      scissorCount,
380                                                      scissors));
381         fCachedScissor = scissors[0];
382     }
383 }
384 
setBlendConstants(const GrVkGpu * gpu,const float blendConstants[4])385 void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
386                                           const float blendConstants[4]) {
387     SkASSERT(fIsActive);
388     if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
389         GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
390         memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
391     }
392 }
393 
addingWork(const GrVkGpu * gpu)394 void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
395     this->submitPipelineBarriers(gpu);
396     fHasWork = true;
397 }
398 
399 ///////////////////////////////////////////////////////////////////////////////
400 // PrimaryCommandBuffer
401 ////////////////////////////////////////////////////////////////////////////////
~GrVkPrimaryCommandBuffer()402 GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
403     // Should have ended any render pass we're in the middle of
404     SkASSERT(!fActiveRenderPass);
405 }
406 
Create(GrVkGpu * gpu,VkCommandPool cmdPool)407 GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(GrVkGpu* gpu,
408                                                            VkCommandPool cmdPool) {
409     const VkCommandBufferAllocateInfo cmdInfo = {
410         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
411         nullptr,                                          // pNext
412         cmdPool,                                          // commandPool
413         VK_COMMAND_BUFFER_LEVEL_PRIMARY,                  // level
414         1                                                 // bufferCount
415     };
416 
417     VkCommandBuffer cmdBuffer;
418     VkResult err;
419     GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
420     if (err) {
421         return nullptr;
422     }
423     return new GrVkPrimaryCommandBuffer(cmdBuffer);
424 }
425 
begin(GrVkGpu * gpu)426 void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) {
427     SkASSERT(!fIsActive);
428     VkCommandBufferBeginInfo cmdBufferBeginInfo;
429     memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
430     cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
431     cmdBufferBeginInfo.pNext = nullptr;
432     cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
433     cmdBufferBeginInfo.pInheritanceInfo = nullptr;
434 
435     GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
436     fIsActive = true;
437 }
438 
end(GrVkGpu * gpu)439 void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
440     SkASSERT(fIsActive);
441     SkASSERT(!fActiveRenderPass);
442 
443     this->submitPipelineBarriers(gpu);
444 
445     GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
446     this->invalidateState();
447     fIsActive = false;
448     fHasWork = false;
449 }
450 
beginRenderPass(GrVkGpu * gpu,const GrVkRenderPass * renderPass,const VkClearValue clearValues[],GrVkRenderTarget * target,const SkIRect & bounds,bool forSecondaryCB)451 bool GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu* gpu,
452                                                const GrVkRenderPass* renderPass,
453                                                const VkClearValue clearValues[],
454                                                GrVkRenderTarget* target,
455                                                const SkIRect& bounds,
456                                                bool forSecondaryCB) {
457     SkASSERT(fIsActive);
458     SkASSERT(!fActiveRenderPass);
459     SkASSERT(renderPass->isCompatible(*target, renderPass->selfDependencyFlags()));
460 
461     const GrVkFramebuffer* framebuffer = target->getFramebuffer(renderPass->hasStencilAttachment(),
462                                                                 renderPass->selfDependencyFlags());
463     if (!framebuffer) {
464         return false;
465     }
466 
467     this->addingWork(gpu);
468 
469     VkRenderPassBeginInfo beginInfo;
470     VkRect2D renderArea;
471     renderArea.offset = { bounds.fLeft , bounds.fTop };
472     renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
473 
474     memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
475     beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
476     beginInfo.pNext = nullptr;
477     beginInfo.renderPass = renderPass->vkRenderPass();
478     beginInfo.framebuffer = framebuffer->framebuffer();
479     beginInfo.renderArea = renderArea;
480     beginInfo.clearValueCount = renderPass->clearValueCount();
481     beginInfo.pClearValues = clearValues;
482 
483     VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
484                                                 : VK_SUBPASS_CONTENTS_INLINE;
485 
486     GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
487     fActiveRenderPass = renderPass;
488     this->addResource(renderPass);
489     target->addResources(*this, renderPass->hasStencilAttachment(),
490                          renderPass->selfDependencyFlags());
491     return true;
492 }
493 
endRenderPass(const GrVkGpu * gpu)494 void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
495     SkASSERT(fIsActive);
496     SkASSERT(fActiveRenderPass);
497     this->addingWork(gpu);
498     GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
499     fActiveRenderPass = nullptr;
500 }
501 
executeCommands(const GrVkGpu * gpu,std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)502 void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
503                                                std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
504     // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
505     // if the command pools both were created from were created with the same queue family. However,
506     // we currently always create them from the same pool.
507     SkASSERT(fIsActive);
508     SkASSERT(!buffer->fIsActive);
509     SkASSERT(fActiveRenderPass);
510     SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
511 
512     this->addingWork(gpu);
513 
514     GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
515     fSecondaryCommandBuffers.push_back(std::move(buffer));
516     // When executing a secondary command buffer all state (besides render pass state) becomes
517     // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
518     this->invalidateState();
519 }
520 
submit_to_queue(GrVkGpu * gpu,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,GrProtected protectedContext)521 static bool submit_to_queue(GrVkGpu* gpu,
522                             VkQueue queue,
523                             VkFence fence,
524                             uint32_t waitCount,
525                             const VkSemaphore* waitSemaphores,
526                             const VkPipelineStageFlags* waitStages,
527                             uint32_t commandBufferCount,
528                             const VkCommandBuffer* commandBuffers,
529                             uint32_t signalCount,
530                             const VkSemaphore* signalSemaphores,
531                             GrProtected protectedContext) {
532     VkProtectedSubmitInfo protectedSubmitInfo;
533     if (protectedContext == GrProtected::kYes) {
534         memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
535         protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
536         protectedSubmitInfo.pNext = nullptr;
537         protectedSubmitInfo.protectedSubmit = VK_TRUE;
538     }
539 
540     VkSubmitInfo submitInfo;
541     memset(&submitInfo, 0, sizeof(VkSubmitInfo));
542     submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
543     submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
544     submitInfo.waitSemaphoreCount = waitCount;
545     submitInfo.pWaitSemaphores = waitSemaphores;
546     submitInfo.pWaitDstStageMask = waitStages;
547     submitInfo.commandBufferCount = commandBufferCount;
548     submitInfo.pCommandBuffers = commandBuffers;
549     submitInfo.signalSemaphoreCount = signalCount;
550     submitInfo.pSignalSemaphores = signalSemaphores;
551     VkResult result;
552     GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence));
553     return result == VK_SUCCESS;
554 }
555 
submitToQueue(GrVkGpu * gpu,VkQueue queue,SkTArray<GrVkSemaphore::Resource * > & signalSemaphores,SkTArray<GrVkSemaphore::Resource * > & waitSemaphores)556 bool GrVkPrimaryCommandBuffer::submitToQueue(
557         GrVkGpu* gpu,
558         VkQueue queue,
559         SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
560         SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
561     SkASSERT(!fIsActive);
562 
563     VkResult err;
564     if (VK_NULL_HANDLE == fSubmitFence) {
565         VkFenceCreateInfo fenceInfo;
566         memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
567         fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
568         GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr,
569                                                 &fSubmitFence));
570         if (err) {
571             fSubmitFence = VK_NULL_HANDLE;
572             return false;
573         }
574     } else {
575         // This cannot return DEVICE_LOST so we assert we succeeded.
576         GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence));
577         SkASSERT(err == VK_SUCCESS);
578     }
579 
580     int signalCount = signalSemaphores.count();
581     int waitCount = waitSemaphores.count();
582 
583     bool submitted = false;
584 
585     if (0 == signalCount && 0 == waitCount) {
586         // This command buffer has no dependent semaphores so we can simply just submit it to the
587         // queue with no worries.
588         submitted = submit_to_queue(
589                 gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr,
590                 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
591     } else {
592         SkTArray<VkSemaphore> vkSignalSems(signalCount);
593         for (int i = 0; i < signalCount; ++i) {
594             if (signalSemaphores[i]->shouldSignal()) {
595                 this->addResource(signalSemaphores[i]);
596                 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
597             }
598         }
599 
600         SkTArray<VkSemaphore> vkWaitSems(waitCount);
601         SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
602         for (int i = 0; i < waitCount; ++i) {
603             if (waitSemaphores[i]->shouldWait()) {
604                 this->addResource(waitSemaphores[i]);
605                 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
606                 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
607             }
608         }
609         submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.count(),
610                                     vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
611                                     vkSignalSems.count(), vkSignalSems.begin(),
612                                     gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
613         if (submitted) {
614             for (int i = 0; i < signalCount; ++i) {
615                 signalSemaphores[i]->markAsSignaled();
616             }
617             for (int i = 0; i < waitCount; ++i) {
618                 waitSemaphores[i]->markAsWaited();
619             }
620         }
621     }
622 
623     if (!submitted) {
624         // Destroy the fence or else we will try to wait forever for it to finish.
625         GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
626         fSubmitFence = VK_NULL_HANDLE;
627         return false;
628     }
629     return true;
630 }
631 
forceSync(GrVkGpu * gpu)632 void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) {
633     SkASSERT(fSubmitFence != VK_NULL_HANDLE);
634     GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
635 }
636 
finished(GrVkGpu * gpu)637 bool GrVkPrimaryCommandBuffer::finished(GrVkGpu* gpu) {
638     SkASSERT(!fIsActive);
639     if (VK_NULL_HANDLE == fSubmitFence) {
640         return true;
641     }
642 
643     VkResult err;
644     GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence));
645     switch (err) {
646         case VK_SUCCESS:
647         case VK_ERROR_DEVICE_LOST:
648             return true;
649 
650         case VK_NOT_READY:
651             return false;
652 
653         default:
654             SkDebugf("Error getting fence status: %d\n", err);
655             SK_ABORT("Got an invalid fence status");
656             return false;
657     }
658 }
659 
addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc)660 void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc) {
661     fFinishedProcs.push_back(std::move(finishedProc));
662 }
663 
onReleaseResources()664 void GrVkPrimaryCommandBuffer::onReleaseResources() {
665     for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
666         fSecondaryCommandBuffers[i]->releaseResources();
667     }
668     this->callFinishedProcs();
669 }
670 
recycleSecondaryCommandBuffers(GrVkCommandPool * cmdPool)671 void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) {
672     for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
673         fSecondaryCommandBuffers[i].release()->recycle(cmdPool);
674     }
675     fSecondaryCommandBuffers.reset();
676 }
677 
copyImage(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkImageCopy * copyRegions)678 void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
679                                          GrVkImage* srcImage,
680                                          VkImageLayout srcLayout,
681                                          GrVkImage* dstImage,
682                                          VkImageLayout dstLayout,
683                                          uint32_t copyRegionCount,
684                                          const VkImageCopy* copyRegions) {
685     SkASSERT(fIsActive);
686     SkASSERT(!fActiveRenderPass);
687     this->addingWork(gpu);
688     this->addResource(srcImage->resource());
689     this->addResource(dstImage->resource());
690     GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
691                                                 srcImage->image(),
692                                                 srcLayout,
693                                                 dstImage->image(),
694                                                 dstLayout,
695                                                 copyRegionCount,
696                                                 copyRegions));
697 }
698 
blitImage(const GrVkGpu * gpu,const GrManagedResource * srcResource,VkImage srcImage,VkImageLayout srcLayout,const GrManagedResource * dstResource,VkImage dstImage,VkImageLayout dstLayout,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)699 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
700                                          const GrManagedResource* srcResource,
701                                          VkImage srcImage,
702                                          VkImageLayout srcLayout,
703                                          const GrManagedResource* dstResource,
704                                          VkImage dstImage,
705                                          VkImageLayout dstLayout,
706                                          uint32_t blitRegionCount,
707                                          const VkImageBlit* blitRegions,
708                                          VkFilter filter) {
709     SkASSERT(fIsActive);
710     SkASSERT(!fActiveRenderPass);
711     this->addingWork(gpu);
712     this->addResource(srcResource);
713     this->addResource(dstResource);
714     GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
715                                                 srcImage,
716                                                 srcLayout,
717                                                 dstImage,
718                                                 dstLayout,
719                                                 blitRegionCount,
720                                                 blitRegions,
721                                                 filter));
722 }
723 
blitImage(const GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)724 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
725                                          const GrVkImage& srcImage,
726                                          const GrVkImage& dstImage,
727                                          uint32_t blitRegionCount,
728                                          const VkImageBlit* blitRegions,
729                                          VkFilter filter) {
730     this->blitImage(gpu,
731                     srcImage.resource(),
732                     srcImage.image(),
733                     srcImage.currentLayout(),
734                     dstImage.resource(),
735                     dstImage.image(),
736                     dstImage.currentLayout(),
737                     blitRegionCount,
738                     blitRegions,
739                     filter);
740 }
741 
742 
copyImageToBuffer(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,GrVkTransferBuffer * dstBuffer,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)743 void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
744                                                  GrVkImage* srcImage,
745                                                  VkImageLayout srcLayout,
746                                                  GrVkTransferBuffer* dstBuffer,
747                                                  uint32_t copyRegionCount,
748                                                  const VkBufferImageCopy* copyRegions) {
749     SkASSERT(fIsActive);
750     SkASSERT(!fActiveRenderPass);
751     this->addingWork(gpu);
752     this->addResource(srcImage->resource());
753     this->addResource(dstBuffer->resource());
754     GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
755                                                         srcImage->image(),
756                                                         srcLayout,
757                                                         dstBuffer->buffer(),
758                                                         copyRegionCount,
759                                                         copyRegions));
760 }
761 
copyBufferToImage(const GrVkGpu * gpu,GrVkTransferBuffer * srcBuffer,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)762 void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
763                                                  GrVkTransferBuffer* srcBuffer,
764                                                  GrVkImage* dstImage,
765                                                  VkImageLayout dstLayout,
766                                                  uint32_t copyRegionCount,
767                                                  const VkBufferImageCopy* copyRegions) {
768     SkASSERT(fIsActive);
769     SkASSERT(!fActiveRenderPass);
770     this->addingWork(gpu);
771     this->addResource(srcBuffer->resource());
772     this->addResource(dstImage->resource());
773     GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
774                                                         srcBuffer->buffer(),
775                                                         dstImage->image(),
776                                                         dstLayout,
777                                                         copyRegionCount,
778                                                         copyRegions));
779 }
780 
781 
copyBuffer(GrVkGpu * gpu,GrVkBuffer * srcBuffer,GrVkBuffer * dstBuffer,uint32_t regionCount,const VkBufferCopy * regions)782 void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
783                                           GrVkBuffer* srcBuffer,
784                                           GrVkBuffer* dstBuffer,
785                                           uint32_t regionCount,
786                                           const VkBufferCopy* regions) {
787     SkASSERT(fIsActive);
788     SkASSERT(!fActiveRenderPass);
789     this->addingWork(gpu);
790 #ifdef SK_DEBUG
791     for (uint32_t i = 0; i < regionCount; ++i) {
792         const VkBufferCopy& region = regions[i];
793         SkASSERT(region.size > 0);
794         SkASSERT(region.srcOffset < srcBuffer->size());
795         SkASSERT(region.dstOffset < dstBuffer->size());
796         SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
797         SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
798     }
799 #endif
800     this->addResource(srcBuffer->resource());
801     this->addResource(dstBuffer->resource());
802     GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
803                                                  srcBuffer->buffer(),
804                                                  dstBuffer->buffer(),
805                                                  regionCount,
806                                                  regions));
807 }
808 
updateBuffer(GrVkGpu * gpu,GrVkBuffer * dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * data)809 void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
810                                             GrVkBuffer* dstBuffer,
811                                             VkDeviceSize dstOffset,
812                                             VkDeviceSize dataSize,
813                                             const void* data) {
814     SkASSERT(fIsActive);
815     SkASSERT(!fActiveRenderPass);
816     SkASSERT(0 == (dstOffset & 0x03));   // four byte aligned
817     // TODO: handle larger transfer sizes
818     SkASSERT(dataSize <= 65536);
819     SkASSERT(0 == (dataSize & 0x03));    // four byte aligned
820     this->addingWork(gpu);
821     this->addResource(dstBuffer->resource());
822     GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
823                                                    dstBuffer->buffer(),
824                                                    dstOffset,
825                                                    dataSize,
826                                                    (const uint32_t*) data));
827 }
828 
clearColorImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearColorValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)829 void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
830                                                GrVkImage* image,
831                                                const VkClearColorValue* color,
832                                                uint32_t subRangeCount,
833                                                const VkImageSubresourceRange* subRanges) {
834     SkASSERT(fIsActive);
835     SkASSERT(!fActiveRenderPass);
836     this->addingWork(gpu);
837     this->addResource(image->resource());
838     GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
839                                                       image->image(),
840                                                       image->currentLayout(),
841                                                       color,
842                                                       subRangeCount,
843                                                       subRanges));
844 }
845 
clearDepthStencilImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearDepthStencilValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)846 void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
847                                                       GrVkImage* image,
848                                                       const VkClearDepthStencilValue* color,
849                                                       uint32_t subRangeCount,
850                                                       const VkImageSubresourceRange* subRanges) {
851     SkASSERT(fIsActive);
852     SkASSERT(!fActiveRenderPass);
853     this->addingWork(gpu);
854     this->addResource(image->resource());
855     GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
856                                                              image->image(),
857                                                              image->currentLayout(),
858                                                              color,
859                                                              subRangeCount,
860                                                              subRanges));
861 }
862 
resolveImage(GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t regionCount,const VkImageResolve * regions)863 void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
864                                             const GrVkImage& srcImage,
865                                             const GrVkImage& dstImage,
866                                             uint32_t regionCount,
867                                             const VkImageResolve* regions) {
868     SkASSERT(fIsActive);
869     SkASSERT(!fActiveRenderPass);
870 
871     this->addingWork(gpu);
872     this->addResource(srcImage.resource());
873     this->addResource(dstImage.resource());
874 
875     GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
876                                                    srcImage.image(),
877                                                    srcImage.currentLayout(),
878                                                    dstImage.image(),
879                                                    dstImage.currentLayout(),
880                                                    regionCount,
881                                                    regions));
882 }
883 
onFreeGPUData(const GrVkGpu * gpu) const884 void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const {
885     SkASSERT(!fActiveRenderPass);
886     // Destroy the fence, if any
887     if (VK_NULL_HANDLE != fSubmitFence) {
888         GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
889     }
890     SkASSERT(!fSecondaryCommandBuffers.count());
891 }
892 
893 ///////////////////////////////////////////////////////////////////////////////
894 // SecondaryCommandBuffer
895 ////////////////////////////////////////////////////////////////////////////////
896 
Create(GrVkGpu * gpu,GrVkCommandPool * cmdPool)897 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(GrVkGpu* gpu,
898                                                                GrVkCommandPool* cmdPool) {
899     SkASSERT(cmdPool);
900     const VkCommandBufferAllocateInfo cmdInfo = {
901         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
902         nullptr,                                          // pNext
903         cmdPool->vkCommandPool(),                         // commandPool
904         VK_COMMAND_BUFFER_LEVEL_SECONDARY,                // level
905         1                                                 // bufferCount
906     };
907 
908     VkCommandBuffer cmdBuffer;
909     VkResult err;
910     GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
911     if (err) {
912         return nullptr;
913     }
914     return new GrVkSecondaryCommandBuffer(cmdBuffer, false);
915 }
916 
Create(VkCommandBuffer cmdBuffer)917 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(VkCommandBuffer cmdBuffer) {
918     return new GrVkSecondaryCommandBuffer(cmdBuffer, true);
919 }
920 
begin(GrVkGpu * gpu,const GrVkFramebuffer * framebuffer,const GrVkRenderPass * compatibleRenderPass)921 void GrVkSecondaryCommandBuffer::begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
922                                        const GrVkRenderPass* compatibleRenderPass) {
923     SkASSERT(!fIsActive);
924     SkASSERT(compatibleRenderPass);
925     fActiveRenderPass = compatibleRenderPass;
926 
927     if (!this->isWrapped()) {
928         VkCommandBufferInheritanceInfo inheritanceInfo;
929         memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
930         inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
931         inheritanceInfo.pNext = nullptr;
932         inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
933         inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
934         inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
935         inheritanceInfo.occlusionQueryEnable = false;
936         inheritanceInfo.queryFlags = 0;
937         inheritanceInfo.pipelineStatistics = 0;
938 
939         VkCommandBufferBeginInfo cmdBufferBeginInfo;
940         memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
941         cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
942         cmdBufferBeginInfo.pNext = nullptr;
943         cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
944                 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
945         cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
946 
947         GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
948     }
949     fIsActive = true;
950 }
951 
end(GrVkGpu * gpu)952 void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
953     SkASSERT(fIsActive);
954     if (!this->isWrapped()) {
955         GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
956     }
957     this->invalidateState();
958     fIsActive = false;
959     fHasWork = false;
960 }
961 
recycle(GrVkCommandPool * cmdPool)962 void GrVkSecondaryCommandBuffer::recycle(GrVkCommandPool* cmdPool) {
963     if (this->isWrapped()) {
964         delete this;
965     } else {
966         cmdPool->recycleSecondaryCommandBuffer(this);
967     }
968 }
969 
970