1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #ifndef GrVkCommandBuffer_DEFINED
9 #define GrVkCommandBuffer_DEFINED
10 
11 #include "include/gpu/vk/GrVkTypes.h"
12 #include "src/gpu/GrCommandBufferRef.h"
13 #include "src/gpu/GrManagedResource.h"
14 #include "src/gpu/vk/GrVkGpu.h"
15 #include "src/gpu/vk/GrVkSemaphore.h"
16 #include "src/gpu/vk/GrVkUtil.h"
17 
18 class GrVkBuffer;
19 class GrVkFramebuffer;
20 class GrVkImage;
21 class GrVkMeshBuffer;
22 class GrVkPipeline;
23 class GrVkPipelineState;
24 class GrVkRenderPass;
25 class GrVkRenderTarget;
26 class GrVkTransferBuffer;
27 
28 class GrVkCommandBuffer {
29 public:
~GrVkCommandBuffer()30     virtual ~GrVkCommandBuffer() {}
31 
32     void invalidateState();
33 
34     ////////////////////////////////////////////////////////////////////////////
35     // CommandBuffer commands
36     ////////////////////////////////////////////////////////////////////////////
37     enum BarrierType {
38         kBufferMemory_BarrierType,
39         kImageMemory_BarrierType
40     };
41 
42     void pipelineBarrier(const GrVkGpu* gpu,
43                          const GrManagedResource* resource,
44                          VkPipelineStageFlags srcStageMask,
45                          VkPipelineStageFlags dstStageMask,
46                          bool byRegion,
47                          BarrierType barrierType,
48                          void* barrier);
49 
50     void bindInputBuffer(GrVkGpu* gpu, uint32_t binding, sk_sp<const GrBuffer> buffer);
51 
52     void bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer);
53 
54     void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline);
55 
56     void bindDescriptorSets(const GrVkGpu* gpu,
57                             GrVkPipelineState*,
58                             VkPipelineLayout layout,
59                             uint32_t firstSet,
60                             uint32_t setCount,
61                             const VkDescriptorSet* descriptorSets,
62                             uint32_t dynamicOffsetCount,
63                             const uint32_t* dynamicOffsets);
64 
65     void setViewport(const GrVkGpu* gpu,
66                      uint32_t firstViewport,
67                      uint32_t viewportCount,
68                      const VkViewport* viewports);
69 
70     void setScissor(const GrVkGpu* gpu,
71                     uint32_t firstScissor,
72                     uint32_t scissorCount,
73                     const VkRect2D* scissors);
74 
75     void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]);
76 
77     // Commands that only work inside of a render pass
78     void clearAttachments(const GrVkGpu* gpu,
79                           int numAttachments,
80                           const VkClearAttachment* attachments,
81                           int numRects,
82                           const VkClearRect* clearRects);
83 
84     void drawIndexed(const GrVkGpu* gpu,
85                      uint32_t indexCount,
86                      uint32_t instanceCount,
87                      uint32_t firstIndex,
88                      int32_t vertexOffset,
89                      uint32_t firstInstance);
90 
91     void draw(const GrVkGpu* gpu,
92               uint32_t vertexCount,
93               uint32_t instanceCount,
94               uint32_t firstVertex,
95               uint32_t firstInstance);
96 
97     void drawIndirect(const GrVkGpu* gpu,
98                       const GrVkMeshBuffer* indirectBuffer,
99                       VkDeviceSize offset,
100                       uint32_t drawCount,
101                       uint32_t stride);
102 
103     void drawIndexedIndirect(const GrVkGpu* gpu,
104                              const GrVkMeshBuffer* indirectBuffer,
105                              VkDeviceSize offset,
106                              uint32_t drawCount,
107                              uint32_t stride);
108 
109     // Add ref-counted resource that will be tracked and released when this command buffer finishes
110     // execution
addResource(const GrManagedResource * resource)111     void addResource(const GrManagedResource* resource) {
112         SkASSERT(resource);
113         resource->ref();
114         resource->notifyQueuedForWorkOnGpu();
115         fTrackedResources.append(1, &resource);
116     }
117 
118     // Add ref-counted resource that will be tracked and released when this command buffer finishes
119     // execution. When it is released, it will signal that the resource can be recycled for reuse.
addRecycledResource(const GrRecycledResource * resource)120     void addRecycledResource(const GrRecycledResource* resource) {
121         resource->ref();
122         resource->notifyQueuedForWorkOnGpu();
123         fTrackedRecycledResources.append(1, &resource);
124     }
125 
addGrBuffer(sk_sp<const GrBuffer> buffer)126     void addGrBuffer(sk_sp<const GrBuffer> buffer) {
127         fTrackedGpuBuffers.push_back(std::move(buffer));
128     }
129 
addGrSurface(sk_sp<const GrSurface> surface)130     void addGrSurface(sk_sp<const GrSurface> surface) {
131         fTrackedGpuSurfaces.push_back(std::move(surface));
132     }
133 
134     void releaseResources();
135 
136     void freeGPUData(const GrGpu* gpu, VkCommandPool pool) const;
137 
hasWork()138     bool hasWork() const { return fHasWork; }
139 
140 protected:
141     GrVkCommandBuffer(VkCommandBuffer cmdBuffer, bool isWrapped = false)
fCmdBuffer(cmdBuffer)142             : fCmdBuffer(cmdBuffer)
143             , fIsWrapped(isWrapped) {
144         fTrackedResources.setReserve(kInitialTrackedResourcesCount);
145         fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
146         this->invalidateState();
147     }
148 
isWrapped()149     bool isWrapped() const { return fIsWrapped; }
150 
151     void addingWork(const GrVkGpu* gpu);
152 
153     void submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency = false);
154 
155     SkTDArray<const GrManagedResource*>   fTrackedResources;
156     SkTDArray<const GrRecycledResource*>  fTrackedRecycledResources;
157     SkSTArray<16, sk_sp<const GrBuffer>>  fTrackedGpuBuffers;
158     SkSTArray<16, gr_cb<const GrSurface>> fTrackedGpuSurfaces;
159 
160     // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
161     // new commands to the buffer;
162     bool                      fIsActive = false;
163     bool                      fHasWork = false;
164 
165     // Stores a pointer to the current active render pass (i.e. begin has been called but not
166     // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own
167     // the render pass.
168     const GrVkRenderPass*     fActiveRenderPass = nullptr;
169 
170     VkCommandBuffer           fCmdBuffer;
171 
172 private:
173     static const int kInitialTrackedResourcesCount = 32;
174 
onReleaseResources()175     virtual void onReleaseResources() {}
176     virtual void onFreeGPUData(const GrVkGpu* gpu) const = 0;
177 
178     static constexpr uint32_t kMaxInputBuffers = 2;
179 
180     VkBuffer fBoundInputBuffers[kMaxInputBuffers];
181     VkBuffer fBoundIndexBuffer;
182 
183     // When resetting the command buffer, we remove the tracked resources from their arrays, and
184     // we prefer to not free all the memory every time so usually we just rewind. However, to avoid
185     // all arrays growing to the max size, after so many resets we'll do a full reset of the tracked
186     // resource arrays.
187     static const int kNumRewindResetsBeforeFullReset = 8;
188     int              fNumResets = 0;
189 
190     // Cached values used for dynamic state updates
191     VkViewport fCachedViewport;
192     VkRect2D   fCachedScissor;
193     float      fCachedBlendConstant[4];
194 
195     // Tracking of memory barriers so that we can submit them all in a batch together.
196     SkSTArray<1, VkBufferMemoryBarrier> fBufferBarriers;
197     SkSTArray<2, VkImageMemoryBarrier> fImageBarriers;
198     bool fBarriersByRegion = false;
199     VkPipelineStageFlags fSrcStageMask = 0;
200     VkPipelineStageFlags fDstStageMask = 0;
201 
202     bool fIsWrapped;
203 };
204 
205 class GrVkSecondaryCommandBuffer;
206 
207 class GrVkPrimaryCommandBuffer : public GrVkCommandBuffer {
208 public:
209     ~GrVkPrimaryCommandBuffer() override;
210 
211     static GrVkPrimaryCommandBuffer* Create(GrVkGpu* gpu, VkCommandPool cmdPool);
212 
213     void begin(GrVkGpu* gpu);
214     void end(GrVkGpu* gpu);
215 
216     // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
217     // in the render pass.
218     bool beginRenderPass(GrVkGpu* gpu,
219                          const GrVkRenderPass* renderPass,
220                          const VkClearValue clearValues[],
221                          GrVkRenderTarget* target,
222                          const SkIRect& bounds,
223                          bool forSecondaryCB);
224     void endRenderPass(const GrVkGpu* gpu);
225 
226     // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are
227     // currently inside a render pass that is compatible with the one used to create the
228     // SecondaryCommandBuffer.
229     void executeCommands(const GrVkGpu* gpu,
230                          std::unique_ptr<GrVkSecondaryCommandBuffer> secondaryBuffer);
231 
232     // Commands that only work outside of a render pass
233     void clearColorImage(const GrVkGpu* gpu,
234                          GrVkImage* image,
235                          const VkClearColorValue* color,
236                          uint32_t subRangeCount,
237                          const VkImageSubresourceRange* subRanges);
238 
239     void clearDepthStencilImage(const GrVkGpu* gpu,
240                                 GrVkImage* image,
241                                 const VkClearDepthStencilValue* color,
242                                 uint32_t subRangeCount,
243                                 const VkImageSubresourceRange* subRanges);
244 
245     void copyImage(const GrVkGpu* gpu,
246                    GrVkImage* srcImage,
247                    VkImageLayout srcLayout,
248                    GrVkImage* dstImage,
249                    VkImageLayout dstLayout,
250                    uint32_t copyRegionCount,
251                    const VkImageCopy* copyRegions);
252 
253     void blitImage(const GrVkGpu* gpu,
254                    const GrManagedResource* srcResource,
255                    VkImage srcImage,
256                    VkImageLayout srcLayout,
257                    const GrManagedResource* dstResource,
258                    VkImage dstImage,
259                    VkImageLayout dstLayout,
260                    uint32_t blitRegionCount,
261                    const VkImageBlit* blitRegions,
262                    VkFilter filter);
263 
264     void blitImage(const GrVkGpu* gpu,
265                    const GrVkImage& srcImage,
266                    const GrVkImage& dstImage,
267                    uint32_t blitRegionCount,
268                    const VkImageBlit* blitRegions,
269                    VkFilter filter);
270 
271     void copyImageToBuffer(const GrVkGpu* gpu,
272                            GrVkImage* srcImage,
273                            VkImageLayout srcLayout,
274                            GrVkTransferBuffer* dstBuffer,
275                            uint32_t copyRegionCount,
276                            const VkBufferImageCopy* copyRegions);
277 
278     void copyBufferToImage(const GrVkGpu* gpu,
279                            GrVkTransferBuffer* srcBuffer,
280                            GrVkImage* dstImage,
281                            VkImageLayout dstLayout,
282                            uint32_t copyRegionCount,
283                            const VkBufferImageCopy* copyRegions);
284 
285     void copyBuffer(GrVkGpu* gpu,
286                     GrVkBuffer* srcBuffer,
287                     GrVkBuffer* dstBuffer,
288                     uint32_t regionCount,
289                     const VkBufferCopy* regions);
290 
291     void updateBuffer(GrVkGpu* gpu,
292                       GrVkBuffer* dstBuffer,
293                       VkDeviceSize dstOffset,
294                       VkDeviceSize dataSize,
295                       const void* data);
296 
297     void resolveImage(GrVkGpu* gpu,
298                       const GrVkImage& srcImage,
299                       const GrVkImage& dstImage,
300                       uint32_t regionCount,
301                       const VkImageResolve* regions);
302 
303     bool submitToQueue(GrVkGpu* gpu, VkQueue queue,
304                        SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
305                        SkTArray<GrVkSemaphore::Resource*>& waitSemaphores);
306 
307     void forceSync(GrVkGpu* gpu);
308 
309     bool finished(GrVkGpu* gpu);
310 
311     void addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc);
312 
callFinishedProcs()313     void callFinishedProcs() {
314         fFinishedProcs.reset();
315     }
316 
317     void recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool);
318 
319 private:
GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)320     explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)
321         : INHERITED(cmdBuffer)
322         , fSubmitFence(VK_NULL_HANDLE) {}
323 
324     void onFreeGPUData(const GrVkGpu* gpu) const override;
325 
326     void onReleaseResources() override;
327 
328     SkTArray<std::unique_ptr<GrVkSecondaryCommandBuffer>, true> fSecondaryCommandBuffers;
329     VkFence                                                     fSubmitFence;
330     SkTArray<sk_sp<GrRefCntedCallback>>                         fFinishedProcs;
331 
332     using INHERITED = GrVkCommandBuffer;
333 };
334 
335 class GrVkSecondaryCommandBuffer : public GrVkCommandBuffer {
336 public:
337     static GrVkSecondaryCommandBuffer* Create(GrVkGpu* gpu, GrVkCommandPool* cmdPool);
338     // Used for wrapping an external secondary command buffer.
339     static GrVkSecondaryCommandBuffer* Create(VkCommandBuffer externalSecondaryCB);
340 
341     void begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
342                const GrVkRenderPass* compatibleRenderPass);
343     void end(GrVkGpu* gpu);
344 
345     void recycle(GrVkCommandPool* cmdPool);
346 
vkCommandBuffer()347     VkCommandBuffer vkCommandBuffer() { return fCmdBuffer; }
348 
349 private:
GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,bool isWrapped)350     explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer, bool isWrapped)
351         : INHERITED(cmdBuffer, isWrapped) {}
352 
onFreeGPUData(const GrVkGpu * gpu)353     void onFreeGPUData(const GrVkGpu* gpu) const override {}
354 
355     // Used for accessing fIsActive (on GrVkCommandBuffer)
356     friend class GrVkPrimaryCommandBuffer;
357 
358     using INHERITED = GrVkCommandBuffer;
359 };
360 
361 #endif
362