1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "src/gpu/vk/GrVkCommandBuffer.h"
9 
10 #include "include/core/SkRect.h"
11 #include "src/gpu/vk/GrVkCommandPool.h"
12 #include "src/gpu/vk/GrVkFramebuffer.h"
13 #include "src/gpu/vk/GrVkGpu.h"
14 #include "src/gpu/vk/GrVkImage.h"
15 #include "src/gpu/vk/GrVkImageView.h"
16 #include "src/gpu/vk/GrVkIndexBuffer.h"
17 #include "src/gpu/vk/GrVkPipeline.h"
18 #include "src/gpu/vk/GrVkPipelineState.h"
19 #include "src/gpu/vk/GrVkPipelineState.h"
20 #include "src/gpu/vk/GrVkRenderPass.h"
21 #include "src/gpu/vk/GrVkRenderTarget.h"
22 #include "src/gpu/vk/GrVkTransferBuffer.h"
23 #include "src/gpu/vk/GrVkUtil.h"
24 #include "src/gpu/vk/GrVkVertexBuffer.h"
25 
invalidateState()26 void GrVkCommandBuffer::invalidateState() {
27     for (auto& boundInputBuffer : fBoundInputBuffers) {
28         boundInputBuffer = VK_NULL_HANDLE;
29     }
30     fBoundIndexBuffer = VK_NULL_HANDLE;
31 
32     memset(&fCachedViewport, 0, sizeof(VkViewport));
33     fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
34 
35     memset(&fCachedScissor, 0, sizeof(VkRect2D));
36     fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
37 
38     for (int i = 0; i < 4; ++i) {
39         fCachedBlendConstant[i] = -1.0;
40     }
41 }
42 
freeGPUData(GrVkGpu * gpu) const43 void GrVkCommandBuffer::freeGPUData(GrVkGpu* gpu) const {
44     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
45     SkASSERT(!fIsActive);
46     for (int i = 0; i < fTrackedResources.count(); ++i) {
47         fTrackedResources[i]->notifyRemovedFromCommandBuffer();
48         fTrackedResources[i]->unref(gpu);
49     }
50 
51     for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
52         fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
53         fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
54     }
55 
56     if (!this->isWrapped()) {
57         GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), fCmdPool->vkCommandPool(),
58                                                           1, &fCmdBuffer));
59     }
60 
61     this->onFreeGPUData(gpu);
62 }
63 
abandonGPUData() const64 void GrVkCommandBuffer::abandonGPUData() const {
65     SkDEBUGCODE(fResourcesReleased = true;)
66     for (int i = 0; i < fTrackedResources.count(); ++i) {
67         fTrackedResources[i]->notifyRemovedFromCommandBuffer();
68         fTrackedResources[i]->unrefAndAbandon();
69     }
70 
71     for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
72         fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
73         // We don't recycle resources when abandoning them.
74         fTrackedRecycledResources[i]->unrefAndAbandon();
75     }
76 
77     this->onAbandonGPUData();
78 }
79 
releaseResources(GrVkGpu * gpu)80 void GrVkCommandBuffer::releaseResources(GrVkGpu* gpu) {
81     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
82     SkDEBUGCODE(fResourcesReleased = true;)
83     SkASSERT(!fIsActive);
84     for (int i = 0; i < fTrackedResources.count(); ++i) {
85         fTrackedResources[i]->notifyRemovedFromCommandBuffer();
86         fTrackedResources[i]->unref(gpu);
87     }
88     for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
89         fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
90         fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
91     }
92 
93     if (++fNumResets > kNumRewindResetsBeforeFullReset) {
94         fTrackedResources.reset();
95         fTrackedRecycledResources.reset();
96         fTrackedResources.setReserve(kInitialTrackedResourcesCount);
97         fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
98         fNumResets = 0;
99     } else {
100         fTrackedResources.rewind();
101         fTrackedRecycledResources.rewind();
102     }
103 
104     this->invalidateState();
105 
106     this->onReleaseResources(gpu);
107 }
108 
109 ////////////////////////////////////////////////////////////////////////////////
110 // CommandBuffer commands
111 ////////////////////////////////////////////////////////////////////////////////
112 
pipelineBarrier(const GrVkGpu * gpu,const GrVkResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)113 void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
114                                         const GrVkResource* resource,
115                                         VkPipelineStageFlags srcStageMask,
116                                         VkPipelineStageFlags dstStageMask,
117                                         bool byRegion,
118                                         BarrierType barrierType,
119                                         void* barrier) {
120     SkASSERT(!this->isWrapped());
121     SkASSERT(fIsActive);
122     // For images we can have barriers inside of render passes but they require us to add more
123     // support in subpasses which need self dependencies to have barriers inside them. Also, we can
124     // never have buffer barriers inside of a render pass. For now we will just assert that we are
125     // not in a render pass.
126     SkASSERT(!fActiveRenderPass);
127 
128     if (barrierType == kBufferMemory_BarrierType) {
129         const VkBufferMemoryBarrier* barrierPtr = reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
130         fBufferBarriers.push_back(*barrierPtr);
131     } else {
132         SkASSERT(barrierType == kImageMemory_BarrierType);
133         const VkImageMemoryBarrier* barrierPtr = reinterpret_cast<VkImageMemoryBarrier*>(barrier);
134         // We need to check if we are adding a pipeline barrier that covers part of the same
135         // subresource range as a barrier that is already in current batch. If it does, then we must
136         // submit the first batch because the vulkan spec does not define a specific ordering for
137         // barriers submitted in the same batch.
138         // TODO: Look if we can gain anything by merging barriers together instead of submitting
139         // the old ones.
140         for (int i = 0; i < fImageBarriers.count(); ++i) {
141             VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
142             if (barrierPtr->image == currentBarrier.image) {
143                 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
144                 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
145                 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
146                 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
147                 SkASSERT(newRange.layerCount == oldRange.layerCount);
148                 uint32_t newStart = newRange.baseMipLevel;
149                 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
150                 uint32_t oldStart = oldRange.baseMipLevel;
151                 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
152                 if (SkTMax(newStart, oldStart) <= SkTMin(newEnd, oldEnd)) {
153                     this->submitPipelineBarriers(gpu);
154                     break;
155                 }
156             }
157         }
158         fImageBarriers.push_back(*barrierPtr);
159     }
160     fBarriersByRegion |= byRegion;
161 
162     fSrcStageMask = fSrcStageMask | srcStageMask;
163     fDstStageMask = fDstStageMask | dstStageMask;
164 
165     fHasWork = true;
166     if (resource) {
167         this->addResource(resource);
168     }
169 }
170 
submitPipelineBarriers(const GrVkGpu * gpu)171 void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu) {
172     SkASSERT(fIsActive);
173 
174     // Currently we never submit a pipeline barrier without at least one memory barrier.
175     if (fBufferBarriers.count() || fImageBarriers.count()) {
176         // For images we can have barriers inside of render passes but they require us to add more
177         // support in subpasses which need self dependencies to have barriers inside them. Also, we
178         // can never have buffer barriers inside of a render pass. For now we will just assert that
179         // we are not in a render pass.
180         SkASSERT(!fActiveRenderPass);
181         SkASSERT(!this->isWrapped());
182         SkASSERT(fSrcStageMask && fDstStageMask);
183 
184         VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
185         GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
186                 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
187                 fBufferBarriers.count(), fBufferBarriers.begin(),
188                 fImageBarriers.count(), fImageBarriers.begin()));
189         fBufferBarriers.reset();
190         fImageBarriers.reset();
191         fBarriersByRegion = false;
192         fSrcStageMask = 0;
193         fDstStageMask = 0;
194     }
195     SkASSERT(!fBufferBarriers.count());
196     SkASSERT(!fImageBarriers.count());
197     SkASSERT(!fBarriersByRegion);
198     SkASSERT(!fSrcStageMask);
199     SkASSERT(!fDstStageMask);
200 }
201 
202 
bindInputBuffer(GrVkGpu * gpu,uint32_t binding,const GrVkVertexBuffer * vbuffer)203 void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
204                                         const GrVkVertexBuffer* vbuffer) {
205     VkBuffer vkBuffer = vbuffer->buffer();
206     SkASSERT(VK_NULL_HANDLE != vkBuffer);
207     SkASSERT(binding < kMaxInputBuffers);
208     // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
209     // to know if we can skip binding or not.
210     if (vkBuffer != fBoundInputBuffers[binding]) {
211         VkDeviceSize offset = vbuffer->offset();
212         GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
213                                                             binding,
214                                                             1,
215                                                             &vkBuffer,
216                                                             &offset));
217         fBoundInputBuffers[binding] = vkBuffer;
218         this->addResource(vbuffer->resource());
219     }
220 }
221 
bindIndexBuffer(GrVkGpu * gpu,const GrVkIndexBuffer * ibuffer)222 void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, const GrVkIndexBuffer* ibuffer) {
223     VkBuffer vkBuffer = ibuffer->buffer();
224     SkASSERT(VK_NULL_HANDLE != vkBuffer);
225     // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
226     // to know if we can skip binding or not.
227     if (vkBuffer != fBoundIndexBuffer) {
228         GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
229                                                           vkBuffer,
230                                                           ibuffer->offset(),
231                                                           VK_INDEX_TYPE_UINT16));
232         fBoundIndexBuffer = vkBuffer;
233         this->addResource(ibuffer->resource());
234     }
235 }
236 
clearAttachments(const GrVkGpu * gpu,int numAttachments,const VkClearAttachment * attachments,int numRects,const VkClearRect * clearRects)237 void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
238                                          int numAttachments,
239                                          const VkClearAttachment* attachments,
240                                          int numRects,
241                                          const VkClearRect* clearRects) {
242     SkASSERT(fIsActive);
243     SkASSERT(fActiveRenderPass);
244     SkASSERT(numAttachments > 0);
245     SkASSERT(numRects > 0);
246 
247     this->addingWork(gpu);
248 
249 #ifdef SK_DEBUG
250     for (int i = 0; i < numAttachments; ++i) {
251         if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
252             uint32_t testIndex;
253             SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
254             SkASSERT(testIndex == attachments[i].colorAttachment);
255         }
256     }
257 #endif
258     GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
259                                                        numAttachments,
260                                                        attachments,
261                                                        numRects,
262                                                        clearRects));
263     if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
264         this->invalidateState();
265     }
266 }
267 
bindDescriptorSets(const GrVkGpu * gpu,GrVkPipelineState * pipelineState,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * descriptorSets,uint32_t dynamicOffsetCount,const uint32_t * dynamicOffsets)268 void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
269                                            GrVkPipelineState* pipelineState,
270                                            VkPipelineLayout layout,
271                                            uint32_t firstSet,
272                                            uint32_t setCount,
273                                            const VkDescriptorSet* descriptorSets,
274                                            uint32_t dynamicOffsetCount,
275                                            const uint32_t* dynamicOffsets) {
276     SkASSERT(fIsActive);
277     GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
278                                                          VK_PIPELINE_BIND_POINT_GRAPHICS,
279                                                          layout,
280                                                          firstSet,
281                                                          setCount,
282                                                          descriptorSets,
283                                                          dynamicOffsetCount,
284                                                          dynamicOffsets));
285 }
286 
bindPipeline(const GrVkGpu * gpu,const GrVkPipeline * pipeline)287 void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
288     SkASSERT(fIsActive);
289     GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
290                                                    VK_PIPELINE_BIND_POINT_GRAPHICS,
291                                                    pipeline->pipeline()));
292     this->addResource(pipeline);
293 }
294 
drawIndexed(const GrVkGpu * gpu,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)295 void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
296                                     uint32_t indexCount,
297                                     uint32_t instanceCount,
298                                     uint32_t firstIndex,
299                                     int32_t vertexOffset,
300                                     uint32_t firstInstance) {
301     SkASSERT(fIsActive);
302     SkASSERT(fActiveRenderPass);
303     this->addingWork(gpu);
304     GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
305                                                   indexCount,
306                                                   instanceCount,
307                                                   firstIndex,
308                                                   vertexOffset,
309                                                   firstInstance));
310 }
311 
draw(const GrVkGpu * gpu,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)312 void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
313                              uint32_t vertexCount,
314                              uint32_t instanceCount,
315                              uint32_t firstVertex,
316                              uint32_t firstInstance) {
317     SkASSERT(fIsActive);
318     SkASSERT(fActiveRenderPass);
319     this->addingWork(gpu);
320     GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
321                                            vertexCount,
322                                            instanceCount,
323                                            firstVertex,
324                                            firstInstance));
325 }
326 
setViewport(const GrVkGpu * gpu,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * viewports)327 void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
328                                     uint32_t firstViewport,
329                                     uint32_t viewportCount,
330                                     const VkViewport* viewports) {
331     SkASSERT(fIsActive);
332     SkASSERT(1 == viewportCount);
333     if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
334         GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
335                                                       firstViewport,
336                                                       viewportCount,
337                                                       viewports));
338         fCachedViewport = viewports[0];
339     }
340 }
341 
setScissor(const GrVkGpu * gpu,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * scissors)342 void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
343                                    uint32_t firstScissor,
344                                    uint32_t scissorCount,
345                                    const VkRect2D* scissors) {
346     SkASSERT(fIsActive);
347     SkASSERT(1 == scissorCount);
348     if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
349         GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
350                                                      firstScissor,
351                                                      scissorCount,
352                                                      scissors));
353         fCachedScissor = scissors[0];
354     }
355 }
356 
setBlendConstants(const GrVkGpu * gpu,const float blendConstants[4])357 void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
358                                           const float blendConstants[4]) {
359     SkASSERT(fIsActive);
360     if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
361         GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
362         memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
363     }
364 }
365 
addingWork(const GrVkGpu * gpu)366 void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
367     this->submitPipelineBarriers(gpu);
368     fHasWork = true;
369 }
370 
371 ///////////////////////////////////////////////////////////////////////////////
372 // PrimaryCommandBuffer
373 ////////////////////////////////////////////////////////////////////////////////
~GrVkPrimaryCommandBuffer()374 GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
375     // Should have ended any render pass we're in the middle of
376     SkASSERT(!fActiveRenderPass);
377 }
378 
Create(const GrVkGpu * gpu,GrVkCommandPool * cmdPool)379 GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(const GrVkGpu* gpu,
380                                                            GrVkCommandPool* cmdPool) {
381     const VkCommandBufferAllocateInfo cmdInfo = {
382         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
383         nullptr,                                          // pNext
384         cmdPool->vkCommandPool(),                         // commandPool
385         VK_COMMAND_BUFFER_LEVEL_PRIMARY,                  // level
386         1                                                 // bufferCount
387     };
388 
389     VkCommandBuffer cmdBuffer;
390     VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
391                                                                          &cmdInfo,
392                                                                          &cmdBuffer));
393     if (err) {
394         return nullptr;
395     }
396     return new GrVkPrimaryCommandBuffer(cmdBuffer, cmdPool);
397 }
398 
begin(const GrVkGpu * gpu)399 void GrVkPrimaryCommandBuffer::begin(const GrVkGpu* gpu) {
400     SkASSERT(!fIsActive);
401     VkCommandBufferBeginInfo cmdBufferBeginInfo;
402     memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
403     cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
404     cmdBufferBeginInfo.pNext = nullptr;
405     cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
406     cmdBufferBeginInfo.pInheritanceInfo = nullptr;
407 
408     GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
409                                                                &cmdBufferBeginInfo));
410     fIsActive = true;
411 }
412 
end(GrVkGpu * gpu)413 void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
414     SkASSERT(fIsActive);
415     SkASSERT(!fActiveRenderPass);
416 
417     this->submitPipelineBarriers(gpu);
418 
419     GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
420     this->invalidateState();
421     fIsActive = false;
422     fHasWork = false;
423 }
424 
beginRenderPass(const GrVkGpu * gpu,const GrVkRenderPass * renderPass,const VkClearValue clearValues[],const GrVkRenderTarget & target,const SkIRect & bounds,bool forSecondaryCB)425 void GrVkPrimaryCommandBuffer::beginRenderPass(const GrVkGpu* gpu,
426                                                const GrVkRenderPass* renderPass,
427                                                const VkClearValue clearValues[],
428                                                const GrVkRenderTarget& target,
429                                                const SkIRect& bounds,
430                                                bool forSecondaryCB) {
431     SkASSERT(fIsActive);
432     SkASSERT(!fActiveRenderPass);
433     SkASSERT(renderPass->isCompatible(target));
434 
435     this->addingWork(gpu);
436 
437     VkRenderPassBeginInfo beginInfo;
438     VkRect2D renderArea;
439     renderArea.offset = { bounds.fLeft , bounds.fTop };
440     renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
441 
442     memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
443     beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
444     beginInfo.pNext = nullptr;
445     beginInfo.renderPass = renderPass->vkRenderPass();
446     beginInfo.framebuffer = target.framebuffer()->framebuffer();
447     beginInfo.renderArea = renderArea;
448     beginInfo.clearValueCount = renderPass->clearValueCount();
449     beginInfo.pClearValues = clearValues;
450 
451     VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
452                                                 : VK_SUBPASS_CONTENTS_INLINE;
453 
454     GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
455     fActiveRenderPass = renderPass;
456     this->addResource(renderPass);
457     target.addResources(*this);
458 }
459 
endRenderPass(const GrVkGpu * gpu)460 void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
461     SkASSERT(fIsActive);
462     SkASSERT(fActiveRenderPass);
463     this->addingWork(gpu);
464     GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
465     fActiveRenderPass = nullptr;
466 }
467 
executeCommands(const GrVkGpu * gpu,std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)468 void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
469                                                std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
470     // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
471     // if the command pools both were created from were created with the same queue family. However,
472     // we currently always create them from the same pool.
473     SkASSERT(buffer->commandPool() == fCmdPool);
474     SkASSERT(fIsActive);
475     SkASSERT(!buffer->fIsActive);
476     SkASSERT(fActiveRenderPass);
477     SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
478 
479     this->addingWork(gpu);
480 
481     GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
482     fSecondaryCommandBuffers.push_back(std::move(buffer));
483     // When executing a secondary command buffer all state (besides render pass state) becomes
484     // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
485     this->invalidateState();
486 }
487 
submit_to_queue(const GrVkInterface * interface,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,GrProtected protectedContext)488 static void submit_to_queue(const GrVkInterface* interface,
489                             VkQueue queue,
490                             VkFence fence,
491                             uint32_t waitCount,
492                             const VkSemaphore* waitSemaphores,
493                             const VkPipelineStageFlags* waitStages,
494                             uint32_t commandBufferCount,
495                             const VkCommandBuffer* commandBuffers,
496                             uint32_t signalCount,
497                             const VkSemaphore* signalSemaphores,
498                             GrProtected protectedContext) {
499     VkProtectedSubmitInfo protectedSubmitInfo;
500     if (protectedContext == GrProtected::kYes) {
501         memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
502         protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
503         protectedSubmitInfo.pNext = nullptr;
504         protectedSubmitInfo.protectedSubmit = VK_TRUE;
505     }
506 
507     VkSubmitInfo submitInfo;
508     memset(&submitInfo, 0, sizeof(VkSubmitInfo));
509     submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
510     submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
511     submitInfo.waitSemaphoreCount = waitCount;
512     submitInfo.pWaitSemaphores = waitSemaphores;
513     submitInfo.pWaitDstStageMask = waitStages;
514     submitInfo.commandBufferCount = commandBufferCount;
515     submitInfo.pCommandBuffers = commandBuffers;
516     submitInfo.signalSemaphoreCount = signalCount;
517     submitInfo.pSignalSemaphores = signalSemaphores;
518     GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence));
519 }
520 
submitToQueue(const GrVkGpu * gpu,VkQueue queue,GrVkGpu::SyncQueue sync,SkTArray<GrVkSemaphore::Resource * > & signalSemaphores,SkTArray<GrVkSemaphore::Resource * > & waitSemaphores)521 void GrVkPrimaryCommandBuffer::submitToQueue(
522         const GrVkGpu* gpu,
523         VkQueue queue,
524         GrVkGpu::SyncQueue sync,
525         SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
526         SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
527     SkASSERT(!fIsActive);
528 
529     VkResult err;
530     if (VK_NULL_HANDLE == fSubmitFence) {
531         VkFenceCreateInfo fenceInfo;
532         memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
533         fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
534         err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr,
535                                                          &fSubmitFence));
536         SkASSERT(!err);
537     } else {
538         GR_VK_CALL(gpu->vkInterface(), ResetFences(gpu->device(), 1, &fSubmitFence));
539     }
540 
541     int signalCount = signalSemaphores.count();
542     int waitCount = waitSemaphores.count();
543 
544     if (0 == signalCount && 0 == waitCount) {
545         // This command buffer has no dependent semaphores so we can simply just submit it to the
546         // queue with no worries.
547         submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr, 1,
548                         &fCmdBuffer, 0, nullptr,
549                         gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
550     } else {
551         SkTArray<VkSemaphore> vkSignalSems(signalCount);
552         for (int i = 0; i < signalCount; ++i) {
553             if (signalSemaphores[i]->shouldSignal()) {
554                 this->addResource(signalSemaphores[i]);
555                 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
556             }
557         }
558 
559         SkTArray<VkSemaphore> vkWaitSems(waitCount);
560         SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
561         for (int i = 0; i < waitCount; ++i) {
562             if (waitSemaphores[i]->shouldWait()) {
563                 this->addResource(waitSemaphores[i]);
564                 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
565                 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
566             }
567         }
568         submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, vkWaitSems.count(),
569                         vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
570                         vkSignalSems.count(), vkSignalSems.begin(),
571                         gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
572         for (int i = 0; i < signalCount; ++i) {
573             signalSemaphores[i]->markAsSignaled();
574         }
575         for (int i = 0; i < waitCount; ++i) {
576             waitSemaphores[i]->markAsWaited();
577         }
578     }
579 
580     if (GrVkGpu::kForce_SyncQueue == sync) {
581         err = GR_VK_CALL(gpu->vkInterface(),
582                          WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
583         if (VK_TIMEOUT == err) {
584             SkDebugf("Fence failed to signal: %d\n", err);
585             SK_ABORT("failing");
586         }
587         SkASSERT(!err);
588 
589         fFinishedProcs.reset();
590 
591         // Destroy the fence
592         GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
593         fSubmitFence = VK_NULL_HANDLE;
594     }
595 }
596 
finished(const GrVkGpu * gpu)597 bool GrVkPrimaryCommandBuffer::finished(const GrVkGpu* gpu) {
598     SkASSERT(!fIsActive);
599     if (VK_NULL_HANDLE == fSubmitFence) {
600         return true;
601     }
602 
603     VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence));
604     switch (err) {
605         case VK_SUCCESS:
606             return true;
607 
608         case VK_NOT_READY:
609             return false;
610 
611         default:
612             SkDebugf("Error getting fence status: %d\n", err);
613             SK_ABORT("failing");
614             break;
615     }
616 
617     return false;
618 }
619 
addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc)620 void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc) {
621     fFinishedProcs.push_back(std::move(finishedProc));
622 }
623 
onReleaseResources(GrVkGpu * gpu)624 void GrVkPrimaryCommandBuffer::onReleaseResources(GrVkGpu* gpu) {
625     for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
626         fSecondaryCommandBuffers[i]->releaseResources(gpu);
627     }
628     fFinishedProcs.reset();
629 }
630 
recycleSecondaryCommandBuffers(GrVkGpu * gpu)631 void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkGpu* gpu) {
632     for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
633         SkASSERT(fSecondaryCommandBuffers[i]->commandPool() == fCmdPool);
634         fSecondaryCommandBuffers[i].release()->recycle(gpu);
635     }
636     fSecondaryCommandBuffers.reset();
637 }
638 
copyImage(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkImageCopy * copyRegions)639 void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
640                                          GrVkImage* srcImage,
641                                          VkImageLayout srcLayout,
642                                          GrVkImage* dstImage,
643                                          VkImageLayout dstLayout,
644                                          uint32_t copyRegionCount,
645                                          const VkImageCopy* copyRegions) {
646     SkASSERT(fIsActive);
647     SkASSERT(!fActiveRenderPass);
648     this->addingWork(gpu);
649     this->addResource(srcImage->resource());
650     this->addResource(dstImage->resource());
651     GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
652                                                 srcImage->image(),
653                                                 srcLayout,
654                                                 dstImage->image(),
655                                                 dstLayout,
656                                                 copyRegionCount,
657                                                 copyRegions));
658 }
659 
blitImage(const GrVkGpu * gpu,const GrVkResource * srcResource,VkImage srcImage,VkImageLayout srcLayout,const GrVkResource * dstResource,VkImage dstImage,VkImageLayout dstLayout,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)660 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
661                                          const GrVkResource* srcResource,
662                                          VkImage srcImage,
663                                          VkImageLayout srcLayout,
664                                          const GrVkResource* dstResource,
665                                          VkImage dstImage,
666                                          VkImageLayout dstLayout,
667                                          uint32_t blitRegionCount,
668                                          const VkImageBlit* blitRegions,
669                                          VkFilter filter) {
670     SkASSERT(fIsActive);
671     SkASSERT(!fActiveRenderPass);
672     this->addingWork(gpu);
673     this->addResource(srcResource);
674     this->addResource(dstResource);
675     GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
676                                                 srcImage,
677                                                 srcLayout,
678                                                 dstImage,
679                                                 dstLayout,
680                                                 blitRegionCount,
681                                                 blitRegions,
682                                                 filter));
683 }
684 
blitImage(const GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t blitRegionCount,const VkImageBlit * blitRegions,VkFilter filter)685 void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
686                                          const GrVkImage& srcImage,
687                                          const GrVkImage& dstImage,
688                                          uint32_t blitRegionCount,
689                                          const VkImageBlit* blitRegions,
690                                          VkFilter filter) {
691     this->blitImage(gpu,
692                     srcImage.resource(),
693                     srcImage.image(),
694                     srcImage.currentLayout(),
695                     dstImage.resource(),
696                     dstImage.image(),
697                     dstImage.currentLayout(),
698                     blitRegionCount,
699                     blitRegions,
700                     filter);
701 }
702 
703 
copyImageToBuffer(const GrVkGpu * gpu,GrVkImage * srcImage,VkImageLayout srcLayout,GrVkTransferBuffer * dstBuffer,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)704 void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
705                                                  GrVkImage* srcImage,
706                                                  VkImageLayout srcLayout,
707                                                  GrVkTransferBuffer* dstBuffer,
708                                                  uint32_t copyRegionCount,
709                                                  const VkBufferImageCopy* copyRegions) {
710     SkASSERT(fIsActive);
711     SkASSERT(!fActiveRenderPass);
712     this->addingWork(gpu);
713     this->addResource(srcImage->resource());
714     this->addResource(dstBuffer->resource());
715     GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
716                                                         srcImage->image(),
717                                                         srcLayout,
718                                                         dstBuffer->buffer(),
719                                                         copyRegionCount,
720                                                         copyRegions));
721 }
722 
copyBufferToImage(const GrVkGpu * gpu,GrVkTransferBuffer * srcBuffer,GrVkImage * dstImage,VkImageLayout dstLayout,uint32_t copyRegionCount,const VkBufferImageCopy * copyRegions)723 void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
724                                                  GrVkTransferBuffer* srcBuffer,
725                                                  GrVkImage* dstImage,
726                                                  VkImageLayout dstLayout,
727                                                  uint32_t copyRegionCount,
728                                                  const VkBufferImageCopy* copyRegions) {
729     SkASSERT(fIsActive);
730     SkASSERT(!fActiveRenderPass);
731     this->addingWork(gpu);
732     this->addResource(srcBuffer->resource());
733     this->addResource(dstImage->resource());
734     GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
735                                                         srcBuffer->buffer(),
736                                                         dstImage->image(),
737                                                         dstLayout,
738                                                         copyRegionCount,
739                                                         copyRegions));
740 }
741 
742 
copyBuffer(GrVkGpu * gpu,GrVkBuffer * srcBuffer,GrVkBuffer * dstBuffer,uint32_t regionCount,const VkBufferCopy * regions)743 void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
744                                           GrVkBuffer* srcBuffer,
745                                           GrVkBuffer* dstBuffer,
746                                           uint32_t regionCount,
747                                           const VkBufferCopy* regions) {
748     SkASSERT(fIsActive);
749     SkASSERT(!fActiveRenderPass);
750     this->addingWork(gpu);
751 #ifdef SK_DEBUG
752     for (uint32_t i = 0; i < regionCount; ++i) {
753         const VkBufferCopy& region = regions[i];
754         SkASSERT(region.size > 0);
755         SkASSERT(region.srcOffset < srcBuffer->size());
756         SkASSERT(region.dstOffset < dstBuffer->size());
757         SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
758         SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
759     }
760 #endif
761     this->addResource(srcBuffer->resource());
762     this->addResource(dstBuffer->resource());
763     GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
764                                                  srcBuffer->buffer(),
765                                                  dstBuffer->buffer(),
766                                                  regionCount,
767                                                  regions));
768 }
769 
updateBuffer(GrVkGpu * gpu,GrVkBuffer * dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * data)770 void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
771                                             GrVkBuffer* dstBuffer,
772                                             VkDeviceSize dstOffset,
773                                             VkDeviceSize dataSize,
774                                             const void* data) {
775     SkASSERT(fIsActive);
776     SkASSERT(!fActiveRenderPass);
777     SkASSERT(0 == (dstOffset & 0x03));   // four byte aligned
778     // TODO: handle larger transfer sizes
779     SkASSERT(dataSize <= 65536);
780     SkASSERT(0 == (dataSize & 0x03));    // four byte aligned
781     this->addingWork(gpu);
782     this->addResource(dstBuffer->resource());
783     GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
784                                                    dstBuffer->buffer(),
785                                                    dstOffset,
786                                                    dataSize,
787                                                    (const uint32_t*) data));
788 }
789 
clearColorImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearColorValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)790 void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
791                                                GrVkImage* image,
792                                                const VkClearColorValue* color,
793                                                uint32_t subRangeCount,
794                                                const VkImageSubresourceRange* subRanges) {
795     SkASSERT(fIsActive);
796     SkASSERT(!fActiveRenderPass);
797     this->addingWork(gpu);
798     this->addResource(image->resource());
799     GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
800                                                       image->image(),
801                                                       image->currentLayout(),
802                                                       color,
803                                                       subRangeCount,
804                                                       subRanges));
805 }
806 
clearDepthStencilImage(const GrVkGpu * gpu,GrVkImage * image,const VkClearDepthStencilValue * color,uint32_t subRangeCount,const VkImageSubresourceRange * subRanges)807 void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
808                                                       GrVkImage* image,
809                                                       const VkClearDepthStencilValue* color,
810                                                       uint32_t subRangeCount,
811                                                       const VkImageSubresourceRange* subRanges) {
812     SkASSERT(fIsActive);
813     SkASSERT(!fActiveRenderPass);
814     this->addingWork(gpu);
815     this->addResource(image->resource());
816     GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
817                                                              image->image(),
818                                                              image->currentLayout(),
819                                                              color,
820                                                              subRangeCount,
821                                                              subRanges));
822 }
823 
resolveImage(GrVkGpu * gpu,const GrVkImage & srcImage,const GrVkImage & dstImage,uint32_t regionCount,const VkImageResolve * regions)824 void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
825                                             const GrVkImage& srcImage,
826                                             const GrVkImage& dstImage,
827                                             uint32_t regionCount,
828                                             const VkImageResolve* regions) {
829     SkASSERT(fIsActive);
830     SkASSERT(!fActiveRenderPass);
831 
832     this->addingWork(gpu);
833     this->addResource(srcImage.resource());
834     this->addResource(dstImage.resource());
835 
836     GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
837                                                    srcImage.image(),
838                                                    srcImage.currentLayout(),
839                                                    dstImage.image(),
840                                                    dstImage.currentLayout(),
841                                                    regionCount,
842                                                    regions));
843 }
844 
onFreeGPUData(GrVkGpu * gpu) const845 void GrVkPrimaryCommandBuffer::onFreeGPUData(GrVkGpu* gpu) const {
846     SkASSERT(!fActiveRenderPass);
847     // Destroy the fence, if any
848     if (VK_NULL_HANDLE != fSubmitFence) {
849         GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
850     }
851     for (const auto& buffer : fSecondaryCommandBuffers) {
852         buffer->freeGPUData(gpu);
853     }
854 }
855 
onAbandonGPUData() const856 void GrVkPrimaryCommandBuffer::onAbandonGPUData() const {
857     SkASSERT(!fActiveRenderPass);
858     for (const auto& buffer : fSecondaryCommandBuffers) {
859         buffer->abandonGPUData();
860     }
861 }
862 
863 ///////////////////////////////////////////////////////////////////////////////
864 // SecondaryCommandBuffer
865 ////////////////////////////////////////////////////////////////////////////////
866 
Create(const GrVkGpu * gpu,GrVkCommandPool * cmdPool)867 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(const GrVkGpu* gpu,
868                                                                GrVkCommandPool* cmdPool) {
869     SkASSERT(cmdPool);
870     const VkCommandBufferAllocateInfo cmdInfo = {
871         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,   // sType
872         nullptr,                                          // pNext
873         cmdPool->vkCommandPool(),                         // commandPool
874         VK_COMMAND_BUFFER_LEVEL_SECONDARY,                // level
875         1                                                 // bufferCount
876     };
877 
878     VkCommandBuffer cmdBuffer;
879     VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
880                                                                          &cmdInfo,
881                                                                          &cmdBuffer));
882     if (err) {
883         return nullptr;
884     }
885     return new GrVkSecondaryCommandBuffer(cmdBuffer, cmdPool);
886 }
887 
Create(VkCommandBuffer cmdBuffer)888 GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(VkCommandBuffer cmdBuffer) {
889     return new GrVkSecondaryCommandBuffer(cmdBuffer, nullptr);
890 }
891 
begin(const GrVkGpu * gpu,const GrVkFramebuffer * framebuffer,const GrVkRenderPass * compatibleRenderPass)892 void GrVkSecondaryCommandBuffer::begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
893                                        const GrVkRenderPass* compatibleRenderPass) {
894     SkASSERT(!fIsActive);
895     SkASSERT(compatibleRenderPass);
896     fActiveRenderPass = compatibleRenderPass;
897 
898     if (!this->isWrapped()) {
899         VkCommandBufferInheritanceInfo inheritanceInfo;
900         memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
901         inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
902         inheritanceInfo.pNext = nullptr;
903         inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
904         inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
905         inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
906         inheritanceInfo.occlusionQueryEnable = false;
907         inheritanceInfo.queryFlags = 0;
908         inheritanceInfo.pipelineStatistics = 0;
909 
910         VkCommandBufferBeginInfo cmdBufferBeginInfo;
911         memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
912         cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
913         cmdBufferBeginInfo.pNext = nullptr;
914         cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
915                 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
916         cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
917 
918         GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
919                                                                    &cmdBufferBeginInfo));
920     }
921     fIsActive = true;
922 }
923 
end(GrVkGpu * gpu)924 void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
925     SkASSERT(fIsActive);
926     if (!this->isWrapped()) {
927         GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
928     }
929     this->invalidateState();
930     fIsActive = false;
931     fHasWork = false;
932 }
933 
recycle(GrVkGpu * gpu)934 void GrVkSecondaryCommandBuffer::recycle(GrVkGpu* gpu) {
935     if (this->isWrapped()) {
936         this->freeGPUData(gpu);
937         delete this;
938     } else {
939         fCmdPool->recycleSecondaryCommandBuffer(this);
940     }
941 }
942 
943