1 /*
2  * Copyright © 2019 Raspberry Pi Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "v3dv_private.h"
25 #include "util/u_pack_color.h"
26 #include "vk_util.h"
27 
28 void
v3dv_job_add_bo(struct v3dv_job * job,struct v3dv_bo * bo)29 v3dv_job_add_bo(struct v3dv_job *job, struct v3dv_bo *bo)
30 {
31    if (!bo)
32       return;
33 
34    if (job->bo_handle_mask & bo->handle_bit) {
35       if (_mesa_set_search(job->bos, bo))
36          return;
37    }
38 
39    _mesa_set_add(job->bos, bo);
40    job->bo_count++;
41    job->bo_handle_mask |= bo->handle_bit;
42 }
43 
44 void
v3dv_job_add_bo_unchecked(struct v3dv_job * job,struct v3dv_bo * bo)45 v3dv_job_add_bo_unchecked(struct v3dv_job *job, struct v3dv_bo *bo)
46 {
47    assert(bo);
48    _mesa_set_add(job->bos, bo);
49    job->bo_count++;
50    job->bo_handle_mask |= bo->handle_bit;
51 }
52 
53 static void
cmd_buffer_init(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_device * device)54 cmd_buffer_init(struct v3dv_cmd_buffer *cmd_buffer,
55                 struct v3dv_device *device)
56 {
57    /* Do not reset the base object! If we are calling this from a command
58     * buffer reset that would reset the loader's dispatch table for the
59     * command buffer, and any other relevant info from vk_object_base
60     */
61    const uint32_t base_size = sizeof(struct vk_command_buffer);
62    uint8_t *cmd_buffer_driver_start = ((uint8_t *) cmd_buffer) + base_size;
63    memset(cmd_buffer_driver_start, 0, sizeof(*cmd_buffer) - base_size);
64 
65    cmd_buffer->device = device;
66 
67    list_inithead(&cmd_buffer->private_objs);
68    list_inithead(&cmd_buffer->jobs);
69    list_inithead(&cmd_buffer->list_link);
70 
71    cmd_buffer->state.subpass_idx = -1;
72    cmd_buffer->state.meta.subpass_idx = -1;
73 
74    cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_INITIALIZED;
75 }
76 
77 static void cmd_buffer_destroy(struct vk_command_buffer *cmd_buffer);
78 
79 static VkResult
cmd_buffer_create(struct v3dv_device * device,struct vk_command_pool * pool,VkCommandBufferLevel level,VkCommandBuffer * pCommandBuffer)80 cmd_buffer_create(struct v3dv_device *device,
81                   struct vk_command_pool *pool,
82                   VkCommandBufferLevel level,
83                   VkCommandBuffer *pCommandBuffer)
84 {
85    struct v3dv_cmd_buffer *cmd_buffer;
86    cmd_buffer = vk_zalloc(&pool->alloc,
87                           sizeof(*cmd_buffer),
88                           8,
89                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
90    if (cmd_buffer == NULL)
91       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
92 
93    VkResult result;
94    result = vk_command_buffer_init(&cmd_buffer->vk, pool, level);
95    if (result != VK_SUCCESS) {
96       vk_free(&pool->alloc, cmd_buffer);
97       return result;
98    }
99 
100    cmd_buffer->vk.destroy = cmd_buffer_destroy;
101    cmd_buffer_init(cmd_buffer, device);
102 
103    *pCommandBuffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
104 
105    return VK_SUCCESS;
106 }
107 
108 static void
job_destroy_gpu_cl_resources(struct v3dv_job * job)109 job_destroy_gpu_cl_resources(struct v3dv_job *job)
110 {
111    assert(job->type == V3DV_JOB_TYPE_GPU_CL ||
112           job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
113 
114    v3dv_cl_destroy(&job->bcl);
115    v3dv_cl_destroy(&job->rcl);
116    v3dv_cl_destroy(&job->indirect);
117 
118    /* Since we don't ref BOs when we add them to the command buffer, don't
119     * unref them here either. Bo's will be freed when their corresponding API
120     * objects are destroyed.
121     */
122    _mesa_set_destroy(job->bos, NULL);
123 
124    v3dv_bo_free(job->device, job->tile_alloc);
125    v3dv_bo_free(job->device, job->tile_state);
126 }
127 
128 static void
job_destroy_cloned_gpu_cl_resources(struct v3dv_job * job)129 job_destroy_cloned_gpu_cl_resources(struct v3dv_job *job)
130 {
131    assert(job->type == V3DV_JOB_TYPE_GPU_CL);
132 
133    list_for_each_entry_safe(struct v3dv_bo, bo, &job->bcl.bo_list, list_link) {
134       list_del(&bo->list_link);
135       vk_free(&job->device->vk.alloc, bo);
136    }
137 
138    list_for_each_entry_safe(struct v3dv_bo, bo, &job->rcl.bo_list, list_link) {
139       list_del(&bo->list_link);
140       vk_free(&job->device->vk.alloc, bo);
141    }
142 
143    list_for_each_entry_safe(struct v3dv_bo, bo, &job->indirect.bo_list, list_link) {
144       list_del(&bo->list_link);
145       vk_free(&job->device->vk.alloc, bo);
146    }
147 }
148 
149 static void
job_destroy_gpu_csd_resources(struct v3dv_job * job)150 job_destroy_gpu_csd_resources(struct v3dv_job *job)
151 {
152    assert(job->type == V3DV_JOB_TYPE_GPU_CSD);
153    assert(job->cmd_buffer);
154 
155    v3dv_cl_destroy(&job->indirect);
156 
157    _mesa_set_destroy(job->bos, NULL);
158 
159    if (job->csd.shared_memory)
160       v3dv_bo_free(job->device, job->csd.shared_memory);
161 }
162 
163 static void
job_destroy_cpu_wait_events_resources(struct v3dv_job * job)164 job_destroy_cpu_wait_events_resources(struct v3dv_job *job)
165 {
166    assert(job->type == V3DV_JOB_TYPE_CPU_WAIT_EVENTS);
167    assert(job->cmd_buffer);
168    vk_free(&job->cmd_buffer->device->vk.alloc, job->cpu.event_wait.events);
169 }
170 
171 static void
job_destroy_cpu_csd_indirect_resources(struct v3dv_job * job)172 job_destroy_cpu_csd_indirect_resources(struct v3dv_job *job)
173 {
174    assert(job->type == V3DV_JOB_TYPE_CPU_CSD_INDIRECT);
175    assert(job->cmd_buffer);
176    v3dv_job_destroy(job->cpu.csd_indirect.csd_job);
177 }
178 
179 void
v3dv_job_destroy(struct v3dv_job * job)180 v3dv_job_destroy(struct v3dv_job *job)
181 {
182    assert(job);
183 
184    list_del(&job->list_link);
185 
186    /* Cloned jobs don't make deep copies of the original jobs, so they don't
187     * own any of their resources. However, they do allocate clones of BO
188     * structs, so make sure we free those.
189     */
190    if (!job->is_clone) {
191       switch (job->type) {
192       case V3DV_JOB_TYPE_GPU_CL:
193       case V3DV_JOB_TYPE_GPU_CL_SECONDARY:
194          job_destroy_gpu_cl_resources(job);
195          break;
196       case V3DV_JOB_TYPE_GPU_CSD:
197          job_destroy_gpu_csd_resources(job);
198          break;
199       case V3DV_JOB_TYPE_CPU_WAIT_EVENTS:
200          job_destroy_cpu_wait_events_resources(job);
201          break;
202       case V3DV_JOB_TYPE_CPU_CSD_INDIRECT:
203          job_destroy_cpu_csd_indirect_resources(job);
204          break;
205       default:
206          break;
207       }
208    } else {
209       /* Cloned jobs */
210       if (job->type == V3DV_JOB_TYPE_GPU_CL)
211          job_destroy_cloned_gpu_cl_resources(job);
212    }
213 
214    vk_free(&job->device->vk.alloc, job);
215 }
216 
217 void
v3dv_cmd_buffer_add_private_obj(struct v3dv_cmd_buffer * cmd_buffer,uint64_t obj,v3dv_cmd_buffer_private_obj_destroy_cb destroy_cb)218 v3dv_cmd_buffer_add_private_obj(struct v3dv_cmd_buffer *cmd_buffer,
219                                 uint64_t obj,
220                                 v3dv_cmd_buffer_private_obj_destroy_cb destroy_cb)
221 {
222    struct v3dv_cmd_buffer_private_obj *pobj =
223       vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(*pobj), 8,
224                VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
225    if (!pobj) {
226       v3dv_flag_oom(cmd_buffer, NULL);
227       return;
228    }
229 
230    pobj->obj = obj;
231    pobj->destroy_cb = destroy_cb;
232 
233    list_addtail(&pobj->list_link, &cmd_buffer->private_objs);
234 }
235 
236 static void
cmd_buffer_destroy_private_obj(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cmd_buffer_private_obj * pobj)237 cmd_buffer_destroy_private_obj(struct v3dv_cmd_buffer *cmd_buffer,
238                                struct v3dv_cmd_buffer_private_obj *pobj)
239 {
240    assert(pobj && pobj->obj && pobj->destroy_cb);
241    pobj->destroy_cb(v3dv_device_to_handle(cmd_buffer->device),
242                     pobj->obj,
243                     &cmd_buffer->device->vk.alloc);
244    list_del(&pobj->list_link);
245    vk_free(&cmd_buffer->device->vk.alloc, pobj);
246 }
247 
248 static void
cmd_buffer_free_resources(struct v3dv_cmd_buffer * cmd_buffer)249 cmd_buffer_free_resources(struct v3dv_cmd_buffer *cmd_buffer)
250 {
251    list_for_each_entry_safe(struct v3dv_job, job,
252                             &cmd_buffer->jobs, list_link) {
253       v3dv_job_destroy(job);
254    }
255 
256    if (cmd_buffer->state.job)
257       v3dv_job_destroy(cmd_buffer->state.job);
258 
259    if (cmd_buffer->state.attachments)
260       vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer->state.attachments);
261 
262    if (cmd_buffer->state.query.end.alloc_count > 0)
263       vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.query.end.states);
264 
265    if (cmd_buffer->push_constants_resource.bo)
266       v3dv_bo_free(cmd_buffer->device, cmd_buffer->push_constants_resource.bo);
267 
268    list_for_each_entry_safe(struct v3dv_cmd_buffer_private_obj, pobj,
269                             &cmd_buffer->private_objs, list_link) {
270       cmd_buffer_destroy_private_obj(cmd_buffer, pobj);
271    }
272 
273    if (cmd_buffer->state.meta.attachments) {
274          assert(cmd_buffer->state.meta.attachment_alloc_count > 0);
275          vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.meta.attachments);
276    }
277 }
278 
279 static void
cmd_buffer_destroy(struct vk_command_buffer * vk_cmd_buffer)280 cmd_buffer_destroy(struct vk_command_buffer *vk_cmd_buffer)
281 {
282    struct v3dv_cmd_buffer *cmd_buffer =
283       container_of(vk_cmd_buffer, struct v3dv_cmd_buffer, vk);
284 
285    cmd_buffer_free_resources(cmd_buffer);
286    vk_command_buffer_finish(&cmd_buffer->vk);
287    vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer);
288 }
289 
290 static bool
attachment_list_is_subset(struct v3dv_subpass_attachment * l1,uint32_t l1_count,struct v3dv_subpass_attachment * l2,uint32_t l2_count)291 attachment_list_is_subset(struct v3dv_subpass_attachment *l1, uint32_t l1_count,
292                           struct v3dv_subpass_attachment *l2, uint32_t l2_count)
293 {
294    for (uint32_t i = 0; i < l1_count; i++) {
295       uint32_t attachment_idx = l1[i].attachment;
296       if (attachment_idx == VK_ATTACHMENT_UNUSED)
297          continue;
298 
299       uint32_t j;
300       for (j = 0; j < l2_count; j++) {
301          if (l2[j].attachment == attachment_idx)
302             break;
303       }
304       if (j == l2_count)
305          return false;
306    }
307 
308    return true;
309  }
310 
311 static bool
cmd_buffer_can_merge_subpass(struct v3dv_cmd_buffer * cmd_buffer,uint32_t subpass_idx)312 cmd_buffer_can_merge_subpass(struct v3dv_cmd_buffer *cmd_buffer,
313                              uint32_t subpass_idx)
314 {
315    const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
316    assert(state->pass);
317 
318    const struct v3dv_physical_device *physical_device =
319       &cmd_buffer->device->instance->physicalDevice;
320 
321    if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
322       return false;
323 
324    if (!cmd_buffer->state.job)
325       return false;
326 
327    if (cmd_buffer->state.job->always_flush)
328       return false;
329 
330    if (!physical_device->options.merge_jobs)
331       return false;
332 
333    /* Each render pass starts a new job */
334    if (subpass_idx == 0)
335       return false;
336 
337    /* Two subpasses can be merged in the same job if we can emit a single RCL
338     * for them (since the RCL includes the END_OF_RENDERING command that
339     * triggers the "render job finished" interrupt). We can do this so long
340     * as both subpasses render against the same attachments.
341     */
342    assert(state->subpass_idx == subpass_idx - 1);
343    struct v3dv_subpass *prev_subpass = &state->pass->subpasses[state->subpass_idx];
344    struct v3dv_subpass *subpass = &state->pass->subpasses[subpass_idx];
345 
346    /* Don't merge if the subpasses have different view masks, since in that
347     * case the framebuffer setup is different and we need to emit different
348     * RCLs.
349     */
350    if (subpass->view_mask != prev_subpass->view_mask)
351       return false;
352 
353    /* Because the list of subpass attachments can include VK_ATTACHMENT_UNUSED,
354     * we need to check that for each subpass all its used attachments are
355     * used by the other subpass.
356     */
357    bool compatible =
358       attachment_list_is_subset(prev_subpass->color_attachments,
359                                 prev_subpass->color_count,
360                                 subpass->color_attachments,
361                                 subpass->color_count);
362    if (!compatible)
363       return false;
364 
365    compatible =
366       attachment_list_is_subset(subpass->color_attachments,
367                                 subpass->color_count,
368                                 prev_subpass->color_attachments,
369                                 prev_subpass->color_count);
370    if (!compatible)
371       return false;
372 
373    if (subpass->ds_attachment.attachment !=
374        prev_subpass->ds_attachment.attachment)
375       return false;
376 
377    /* FIXME: Since some attachment formats can't be resolved using the TLB we
378     * need to emit separate resolve jobs for them and that would not be
379     * compatible with subpass merges. We could fix that by testing if any of
380     * the attachments to resolve doesn't support TLB resolves.
381     */
382    if (prev_subpass->resolve_attachments || subpass->resolve_attachments ||
383        prev_subpass->resolve_depth || prev_subpass->resolve_stencil ||
384        subpass->resolve_depth || subpass->resolve_stencil) {
385       return false;
386    }
387 
388    return true;
389 }
390 
391 /**
392  * Computes and sets the job frame tiling information required to setup frame
393  * binning and rendering.
394  */
395 static struct v3dv_frame_tiling *
job_compute_frame_tiling(struct v3dv_job * job,uint32_t width,uint32_t height,uint32_t layers,uint32_t render_target_count,uint8_t max_internal_bpp,bool msaa)396 job_compute_frame_tiling(struct v3dv_job *job,
397                          uint32_t width,
398                          uint32_t height,
399                          uint32_t layers,
400                          uint32_t render_target_count,
401                          uint8_t max_internal_bpp,
402                          bool msaa)
403 {
404    assert(job);
405    struct v3dv_frame_tiling *tiling = &job->frame_tiling;
406 
407    tiling->width = width;
408    tiling->height = height;
409    tiling->layers = layers;
410    tiling->render_target_count = render_target_count;
411    tiling->msaa = msaa;
412    tiling->internal_bpp = max_internal_bpp;
413 
414    /* We can use double-buffer when MSAA is disabled to reduce tile store
415     * overhead.
416     *
417     * FIXME: if we are emitting any tile loads the hardware will serialize
418     * loads and stores across tiles effectivley disabling double buffering,
419     * so we would want to check for that and not enable it in that case to
420     * avoid reducing the tile size.
421     */
422    tiling->double_buffer =
423       unlikely(V3D_DEBUG & V3D_DEBUG_DOUBLE_BUFFER) && !msaa;
424 
425    assert(!tiling->msaa || !tiling->double_buffer);
426 
427    v3d_choose_tile_size(render_target_count, max_internal_bpp,
428                         tiling->msaa, tiling->double_buffer,
429                         &tiling->tile_width, &tiling->tile_height);
430 
431    tiling->draw_tiles_x = DIV_ROUND_UP(width, tiling->tile_width);
432    tiling->draw_tiles_y = DIV_ROUND_UP(height, tiling->tile_height);
433 
434    /* Size up our supertiles until we get under the limit */
435    const uint32_t max_supertiles = 256;
436    tiling->supertile_width = 1;
437    tiling->supertile_height = 1;
438    for (;;) {
439       tiling->frame_width_in_supertiles =
440          DIV_ROUND_UP(tiling->draw_tiles_x, tiling->supertile_width);
441       tiling->frame_height_in_supertiles =
442          DIV_ROUND_UP(tiling->draw_tiles_y, tiling->supertile_height);
443       const uint32_t num_supertiles = tiling->frame_width_in_supertiles *
444                                       tiling->frame_height_in_supertiles;
445       if (num_supertiles < max_supertiles)
446          break;
447 
448       if (tiling->supertile_width < tiling->supertile_height)
449          tiling->supertile_width++;
450       else
451          tiling->supertile_height++;
452    }
453 
454    return tiling;
455 }
456 
457 void
v3dv_job_start_frame(struct v3dv_job * job,uint32_t width,uint32_t height,uint32_t layers,bool allocate_tile_state_for_all_layers,uint32_t render_target_count,uint8_t max_internal_bpp,bool msaa)458 v3dv_job_start_frame(struct v3dv_job *job,
459                      uint32_t width,
460                      uint32_t height,
461                      uint32_t layers,
462                      bool allocate_tile_state_for_all_layers,
463                      uint32_t render_target_count,
464                      uint8_t max_internal_bpp,
465                      bool msaa)
466 {
467    assert(job);
468 
469    /* Start by computing frame tiling spec for this job */
470    const struct v3dv_frame_tiling *tiling =
471       job_compute_frame_tiling(job,
472                                width, height, layers,
473                                render_target_count, max_internal_bpp, msaa);
474 
475    v3dv_cl_ensure_space_with_branch(&job->bcl, 256);
476    v3dv_return_if_oom(NULL, job);
477 
478    /* We only need to allocate tile state for all layers if the binner
479     * writes primitives to layers other than the first. This can only be
480     * done using layered rendering (writing gl_Layer from a geometry shader),
481     * so for other cases of multilayered framebuffers (typically with
482     * meta copy/clear operations) that won't use layered rendering, we only
483     * need one layer worth of of tile state for the binner.
484     */
485    if (!allocate_tile_state_for_all_layers)
486       layers = 1;
487 
488    /* The PTB will request the tile alloc initial size per tile at start
489     * of tile binning.
490     */
491    uint32_t tile_alloc_size = 64 * tiling->layers *
492                               tiling->draw_tiles_x *
493                               tiling->draw_tiles_y;
494 
495    /* The PTB allocates in aligned 4k chunks after the initial setup. */
496    tile_alloc_size = align(tile_alloc_size, 4096);
497 
498    /* Include the first two chunk allocations that the PTB does so that
499     * we definitely clear the OOM condition before triggering one (the HW
500     * won't trigger OOM during the first allocations).
501     */
502    tile_alloc_size += 8192;
503 
504    /* For performance, allocate some extra initial memory after the PTB's
505     * minimal allocations, so that we hopefully don't have to block the
506     * GPU on the kernel handling an OOM signal.
507     */
508    tile_alloc_size += 512 * 1024;
509 
510    job->tile_alloc = v3dv_bo_alloc(job->device, tile_alloc_size,
511                                    "tile_alloc", true);
512    if (!job->tile_alloc) {
513       v3dv_flag_oom(NULL, job);
514       return;
515    }
516 
517    v3dv_job_add_bo_unchecked(job, job->tile_alloc);
518 
519    const uint32_t tsda_per_tile_size = 256;
520    const uint32_t tile_state_size = tiling->layers *
521                                     tiling->draw_tiles_x *
522                                     tiling->draw_tiles_y *
523                                     tsda_per_tile_size;
524    job->tile_state = v3dv_bo_alloc(job->device, tile_state_size, "TSDA", true);
525    if (!job->tile_state) {
526       v3dv_flag_oom(NULL, job);
527       return;
528    }
529 
530    v3dv_job_add_bo_unchecked(job, job->tile_state);
531 
532    v3dv_X(job->device, job_emit_binning_prolog)(job, tiling, layers);
533 
534    job->ez_state = V3D_EZ_UNDECIDED;
535    job->first_ez_state = V3D_EZ_UNDECIDED;
536 }
537 
538 static void
cmd_buffer_end_render_pass_frame(struct v3dv_cmd_buffer * cmd_buffer)539 cmd_buffer_end_render_pass_frame(struct v3dv_cmd_buffer *cmd_buffer)
540 {
541    assert(cmd_buffer->state.job);
542 
543    /* Typically, we have a single job for each subpass and we emit the job's RCL
544     * here when we are ending the frame for the subpass. However, some commands
545     * such as vkCmdClearAttachments need to run in their own separate job and
546     * they emit their own RCL even if they execute inside a subpass. In this
547     * scenario, we don't want to emit subpass RCL when we end the frame for
548     * those jobs, so we only emit the subpass RCL if the job has not recorded
549     * any RCL commands of its own.
550     */
551    if (v3dv_cl_offset(&cmd_buffer->state.job->rcl) == 0)
552       v3dv_X(cmd_buffer->device, cmd_buffer_emit_render_pass_rcl)(cmd_buffer);
553 
554    v3dv_X(cmd_buffer->device, job_emit_binning_flush)(cmd_buffer->state.job);
555 }
556 
557 struct v3dv_job *
v3dv_cmd_buffer_create_cpu_job(struct v3dv_device * device,enum v3dv_job_type type,struct v3dv_cmd_buffer * cmd_buffer,uint32_t subpass_idx)558 v3dv_cmd_buffer_create_cpu_job(struct v3dv_device *device,
559                                enum v3dv_job_type type,
560                                struct v3dv_cmd_buffer *cmd_buffer,
561                                uint32_t subpass_idx)
562 {
563    struct v3dv_job *job = vk_zalloc(&device->vk.alloc,
564                                     sizeof(struct v3dv_job), 8,
565                                     VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
566    if (!job) {
567       v3dv_flag_oom(cmd_buffer, NULL);
568       return NULL;
569    }
570 
571    v3dv_job_init(job, type, device, cmd_buffer, subpass_idx);
572    return job;
573 }
574 
575 static void
cmd_buffer_add_cpu_jobs_for_pending_state(struct v3dv_cmd_buffer * cmd_buffer)576 cmd_buffer_add_cpu_jobs_for_pending_state(struct v3dv_cmd_buffer *cmd_buffer)
577 {
578    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
579 
580    if (state->query.end.used_count > 0) {
581       const uint32_t query_count = state->query.end.used_count;
582       for (uint32_t i = 0; i < query_count; i++) {
583          assert(i < state->query.end.used_count);
584          struct v3dv_job *job =
585             v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
586                                            V3DV_JOB_TYPE_CPU_END_QUERY,
587                                            cmd_buffer, -1);
588          v3dv_return_if_oom(cmd_buffer, NULL);
589 
590          job->cpu.query_end = state->query.end.states[i];
591          list_addtail(&job->list_link, &cmd_buffer->jobs);
592       }
593    }
594 }
595 
596 void
v3dv_cmd_buffer_finish_job(struct v3dv_cmd_buffer * cmd_buffer)597 v3dv_cmd_buffer_finish_job(struct v3dv_cmd_buffer *cmd_buffer)
598 {
599    struct v3dv_job *job = cmd_buffer->state.job;
600    if (!job)
601       return;
602 
603    if (cmd_buffer->state.oom) {
604       v3dv_job_destroy(job);
605       cmd_buffer->state.job = NULL;
606       return;
607    }
608 
609    /* If we have created a job for a command buffer then we should have
610     * recorded something into it: if the job was started in a render pass, it
611     * should at least have the start frame commands, otherwise, it should have
612     * a transfer command. The only exception are secondary command buffers
613     * inside a render pass.
614     */
615    assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
616           v3dv_cl_offset(&job->bcl) > 0);
617 
618    /* When we merge multiple subpasses into the same job we must only emit one
619     * RCL, so we do that here, when we decided that we need to finish the job.
620     * Any rendering that happens outside a render pass is never merged, so
621     * the RCL should have been emitted by the time we got here.
622     */
623    assert(v3dv_cl_offset(&job->rcl) != 0 || cmd_buffer->state.pass);
624 
625    /* If we are finishing a job inside a render pass we have two scenarios:
626     *
627     * 1. It is a regular CL, in which case we will submit the job to the GPU,
628     *    so we may need to generate an RCL and add a binning flush.
629     *
630     * 2. It is a partial CL recorded in a secondary command buffer, in which
631     *    case we are not submitting it directly to the GPU but rather branch to
632     *    it from a primary command buffer. In this case we just want to end
633     *    the BCL with a RETURN_FROM_SUB_LIST and the RCL and binning flush
634     *    will be the primary job that branches to this CL.
635     */
636    if (cmd_buffer->state.pass) {
637       if (job->type == V3DV_JOB_TYPE_GPU_CL) {
638          cmd_buffer_end_render_pass_frame(cmd_buffer);
639       } else {
640          assert(job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
641          v3dv_X(cmd_buffer->device, cmd_buffer_end_render_pass_secondary)(cmd_buffer);
642       }
643    }
644 
645    list_addtail(&job->list_link, &cmd_buffer->jobs);
646    cmd_buffer->state.job = NULL;
647 
648    /* If we have recorded any state with this last GPU job that requires to
649     * emit CPU jobs after the job is completed, add them now. The only
650     * exception is secondary command buffers inside a render pass, because in
651     * that case we want to defer this until we finish recording the primary
652     * job into which we execute the secondary.
653     */
654    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ||
655        !cmd_buffer->state.pass) {
656       cmd_buffer_add_cpu_jobs_for_pending_state(cmd_buffer);
657    }
658 }
659 
660 bool
v3dv_job_type_is_gpu(struct v3dv_job * job)661 v3dv_job_type_is_gpu(struct v3dv_job *job)
662 {
663    switch (job->type) {
664    case V3DV_JOB_TYPE_GPU_CL:
665    case V3DV_JOB_TYPE_GPU_CL_SECONDARY:
666    case V3DV_JOB_TYPE_GPU_TFU:
667    case V3DV_JOB_TYPE_GPU_CSD:
668       return true;
669    default:
670       return false;
671    }
672 }
673 
674 static void
cmd_buffer_serialize_job_if_needed(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_job * job)675 cmd_buffer_serialize_job_if_needed(struct v3dv_cmd_buffer *cmd_buffer,
676                                    struct v3dv_job *job)
677 {
678    assert(cmd_buffer && job);
679 
680    if (!cmd_buffer->state.has_barrier)
681       return;
682 
683    /* Serialization only affects GPU jobs, CPU jobs are always automatically
684     * serialized.
685     */
686    if (!v3dv_job_type_is_gpu(job))
687       return;
688 
689    job->serialize = true;
690    if (cmd_buffer->state.has_bcl_barrier &&
691        (job->type == V3DV_JOB_TYPE_GPU_CL ||
692         job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY)) {
693       job->needs_bcl_sync = true;
694    }
695 
696    cmd_buffer->state.has_barrier = false;
697    cmd_buffer->state.has_bcl_barrier = false;
698 }
699 
700 void
v3dv_job_init(struct v3dv_job * job,enum v3dv_job_type type,struct v3dv_device * device,struct v3dv_cmd_buffer * cmd_buffer,int32_t subpass_idx)701 v3dv_job_init(struct v3dv_job *job,
702               enum v3dv_job_type type,
703               struct v3dv_device *device,
704               struct v3dv_cmd_buffer *cmd_buffer,
705               int32_t subpass_idx)
706 {
707    assert(job);
708 
709    /* Make sure we haven't made this new job current before calling here */
710    assert(!cmd_buffer || cmd_buffer->state.job != job);
711 
712    job->type = type;
713 
714    job->device = device;
715    job->cmd_buffer = cmd_buffer;
716 
717    list_inithead(&job->list_link);
718 
719    if (type == V3DV_JOB_TYPE_GPU_CL ||
720        type == V3DV_JOB_TYPE_GPU_CL_SECONDARY ||
721        type == V3DV_JOB_TYPE_GPU_CSD) {
722       job->bos =
723          _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
724       job->bo_count = 0;
725 
726       v3dv_cl_init(job, &job->indirect);
727 
728       if (unlikely(V3D_DEBUG & V3D_DEBUG_ALWAYS_FLUSH))
729          job->always_flush = true;
730    }
731 
732    if (type == V3DV_JOB_TYPE_GPU_CL ||
733        type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
734       v3dv_cl_init(job, &job->bcl);
735       v3dv_cl_init(job, &job->rcl);
736    }
737 
738    if (cmd_buffer) {
739       /* Flag all state as dirty. Generally, we need to re-emit state for each
740        * new job.
741        *
742        * FIXME: there may be some exceptions, in which case we could skip some
743        * bits.
744        */
745       cmd_buffer->state.dirty = ~0;
746       cmd_buffer->state.dirty_descriptor_stages = ~0;
747 
748       /* Honor inheritance of occlussion queries in secondaries if requested */
749       if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
750           cmd_buffer->state.inheritance.occlusion_query_enable) {
751          cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
752       }
753 
754       /* Keep track of the first subpass that we are recording in this new job.
755        * We will use this when we emit the RCL to decide how to emit our loads
756        * and stores.
757        */
758       if (cmd_buffer->state.pass)
759          job->first_subpass = subpass_idx;
760 
761       cmd_buffer_serialize_job_if_needed(cmd_buffer, job);
762    }
763 }
764 
765 struct v3dv_job *
v3dv_cmd_buffer_start_job(struct v3dv_cmd_buffer * cmd_buffer,int32_t subpass_idx,enum v3dv_job_type type)766 v3dv_cmd_buffer_start_job(struct v3dv_cmd_buffer *cmd_buffer,
767                           int32_t subpass_idx,
768                           enum v3dv_job_type type)
769 {
770    /* Don't create a new job if we can merge the current subpass into
771     * the current job.
772     */
773    if (cmd_buffer->state.pass &&
774        subpass_idx != -1 &&
775        cmd_buffer_can_merge_subpass(cmd_buffer, subpass_idx)) {
776       cmd_buffer->state.job->is_subpass_finish = false;
777       return cmd_buffer->state.job;
778    }
779 
780    /* Ensure we are not starting a new job without finishing a previous one */
781    if (cmd_buffer->state.job != NULL)
782       v3dv_cmd_buffer_finish_job(cmd_buffer);
783 
784    assert(cmd_buffer->state.job == NULL);
785    struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
786                                     sizeof(struct v3dv_job), 8,
787                                     VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
788 
789    if (!job) {
790       fprintf(stderr, "Error: failed to allocate CPU memory for job\n");
791       v3dv_flag_oom(cmd_buffer, NULL);
792       return NULL;
793    }
794 
795    v3dv_job_init(job, type, cmd_buffer->device, cmd_buffer, subpass_idx);
796    cmd_buffer->state.job = job;
797 
798    return job;
799 }
800 
801 static VkResult
cmd_buffer_reset(struct v3dv_cmd_buffer * cmd_buffer,VkCommandBufferResetFlags flags)802 cmd_buffer_reset(struct v3dv_cmd_buffer *cmd_buffer,
803                  VkCommandBufferResetFlags flags)
804 {
805    vk_command_buffer_reset(&cmd_buffer->vk);
806    if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_INITIALIZED) {
807       struct v3dv_device *device = cmd_buffer->device;
808 
809       /* FIXME: For now we always free all resources as if
810        * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT was set.
811        */
812       if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_NEW)
813          cmd_buffer_free_resources(cmd_buffer);
814 
815       cmd_buffer_init(cmd_buffer, device);
816    }
817 
818    assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
819    return VK_SUCCESS;
820 }
821 
822 VKAPI_ATTR VkResult VKAPI_CALL
v3dv_AllocateCommandBuffers(VkDevice _device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)823 v3dv_AllocateCommandBuffers(VkDevice _device,
824                             const VkCommandBufferAllocateInfo *pAllocateInfo,
825                             VkCommandBuffer *pCommandBuffers)
826 {
827    V3DV_FROM_HANDLE(v3dv_device, device, _device);
828    VK_FROM_HANDLE(vk_command_pool, pool, pAllocateInfo->commandPool);
829 
830    VkResult result = VK_SUCCESS;
831    uint32_t i;
832 
833    for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
834       result = cmd_buffer_create(device, pool, pAllocateInfo->level,
835                                  &pCommandBuffers[i]);
836       if (result != VK_SUCCESS)
837          break;
838    }
839 
840    if (result != VK_SUCCESS) {
841       while (i--) {
842          VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, pCommandBuffers[i]);
843          cmd_buffer_destroy(cmd_buffer);
844       }
845       for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
846          pCommandBuffers[i] = VK_NULL_HANDLE;
847    }
848 
849    return result;
850 }
851 
852 static void
cmd_buffer_subpass_handle_pending_resolves(struct v3dv_cmd_buffer * cmd_buffer)853 cmd_buffer_subpass_handle_pending_resolves(struct v3dv_cmd_buffer *cmd_buffer)
854 {
855    assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
856    const struct v3dv_render_pass *pass = cmd_buffer->state.pass;
857    const struct v3dv_subpass *subpass =
858       &pass->subpasses[cmd_buffer->state.subpass_idx];
859 
860    if (!subpass->resolve_attachments)
861       return;
862 
863    /* At this point we have already ended the current subpass and now we are
864     * about to emit vkCmdResolveImage calls to get the resolves we can't handle
865     * handle in the subpass RCL.
866     *
867     * vkCmdResolveImage is not supposed to be called inside a render pass so
868     * before we call that we need to make sure our command buffer state reflects
869     * that we are no longer in a subpass by finishing the current job and
870     * resetting the framebuffer and render pass state temporarily and then
871     * restoring it after we are done with the resolves.
872     */
873    if (cmd_buffer->state.job)
874       v3dv_cmd_buffer_finish_job(cmd_buffer);
875    struct v3dv_framebuffer *restore_fb = cmd_buffer->state.framebuffer;
876    struct v3dv_render_pass *restore_pass = cmd_buffer->state.pass;
877    uint32_t restore_subpass_idx = cmd_buffer->state.subpass_idx;
878    cmd_buffer->state.framebuffer = NULL;
879    cmd_buffer->state.pass = NULL;
880    cmd_buffer->state.subpass_idx = -1;
881 
882    VkCommandBuffer cmd_buffer_handle = v3dv_cmd_buffer_to_handle(cmd_buffer);
883    for (uint32_t i = 0; i < subpass->color_count; i++) {
884       const uint32_t src_attachment_idx =
885          subpass->color_attachments[i].attachment;
886       if (src_attachment_idx == VK_ATTACHMENT_UNUSED)
887          continue;
888 
889       /* Skip if this attachment doesn't have a resolve or if it was already
890        * implemented as a TLB resolve.
891        */
892       if (!cmd_buffer->state.attachments[src_attachment_idx].has_resolve ||
893           cmd_buffer->state.attachments[src_attachment_idx].use_tlb_resolve) {
894          continue;
895       }
896 
897       const uint32_t dst_attachment_idx =
898          subpass->resolve_attachments[i].attachment;
899       assert(dst_attachment_idx != VK_ATTACHMENT_UNUSED);
900 
901       struct v3dv_image_view *src_iview =
902          cmd_buffer->state.attachments[src_attachment_idx].image_view;
903       struct v3dv_image_view *dst_iview =
904          cmd_buffer->state.attachments[dst_attachment_idx].image_view;
905 
906       VkImageResolve2KHR region = {
907          .sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR,
908          .srcSubresource = {
909             VK_IMAGE_ASPECT_COLOR_BIT,
910             src_iview->vk.base_mip_level,
911             src_iview->vk.base_array_layer,
912             src_iview->vk.layer_count,
913          },
914          .srcOffset = { 0, 0, 0 },
915          .dstSubresource =  {
916             VK_IMAGE_ASPECT_COLOR_BIT,
917             dst_iview->vk.base_mip_level,
918             dst_iview->vk.base_array_layer,
919             dst_iview->vk.layer_count,
920          },
921          .dstOffset = { 0, 0, 0 },
922          .extent = src_iview->vk.image->extent,
923       };
924 
925       struct v3dv_image *src_image = (struct v3dv_image *) src_iview->vk.image;
926       struct v3dv_image *dst_image = (struct v3dv_image *) dst_iview->vk.image;
927       VkResolveImageInfo2KHR resolve_info = {
928          .sType = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR,
929          .srcImage = v3dv_image_to_handle(src_image),
930          .srcImageLayout = VK_IMAGE_LAYOUT_GENERAL,
931          .dstImage = v3dv_image_to_handle(dst_image),
932          .dstImageLayout = VK_IMAGE_LAYOUT_GENERAL,
933          .regionCount = 1,
934          .pRegions = &region,
935       };
936       v3dv_CmdResolveImage2KHR(cmd_buffer_handle, &resolve_info);
937    }
938 
939    cmd_buffer->state.framebuffer = restore_fb;
940    cmd_buffer->state.pass = restore_pass;
941    cmd_buffer->state.subpass_idx = restore_subpass_idx;
942 }
943 
944 static VkResult
cmd_buffer_begin_render_pass_secondary(struct v3dv_cmd_buffer * cmd_buffer,const VkCommandBufferInheritanceInfo * inheritance_info)945 cmd_buffer_begin_render_pass_secondary(
946    struct v3dv_cmd_buffer *cmd_buffer,
947    const VkCommandBufferInheritanceInfo *inheritance_info)
948 {
949    assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
950    assert(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
951    assert(inheritance_info);
952 
953    cmd_buffer->state.pass =
954       v3dv_render_pass_from_handle(inheritance_info->renderPass);
955    assert(cmd_buffer->state.pass);
956 
957    cmd_buffer->state.framebuffer =
958       v3dv_framebuffer_from_handle(inheritance_info->framebuffer);
959 
960    assert(inheritance_info->subpass < cmd_buffer->state.pass->subpass_count);
961    cmd_buffer->state.subpass_idx = inheritance_info->subpass;
962 
963    cmd_buffer->state.inheritance.occlusion_query_enable =
964       inheritance_info->occlusionQueryEnable;
965 
966    /* Secondaries that execute inside a render pass won't start subpasses
967     * so we want to create a job for them here.
968     */
969    struct v3dv_job *job =
970       v3dv_cmd_buffer_start_job(cmd_buffer, inheritance_info->subpass,
971                                 V3DV_JOB_TYPE_GPU_CL_SECONDARY);
972    if (!job) {
973       v3dv_flag_oom(cmd_buffer, NULL);
974       return VK_ERROR_OUT_OF_HOST_MEMORY;
975    }
976 
977    /* Secondary command buffers don't know about the render area, but our
978     * scissor setup accounts for it, so let's make sure we make it large
979     * enough that it doesn't actually constrain any rendering. This should
980     * be fine, since the Vulkan spec states:
981     *
982     *    "The application must ensure (using scissor if necessary) that all
983     *     rendering is contained within the render area."
984     *
985     * FIXME: setup constants for the max framebuffer dimensions and use them
986     * here and when filling in VkPhysicalDeviceLimits.
987     */
988    const struct v3dv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
989    cmd_buffer->state.render_area.offset.x = 0;
990    cmd_buffer->state.render_area.offset.y = 0;
991    cmd_buffer->state.render_area.extent.width =
992       framebuffer ? framebuffer->width : 4096;
993    cmd_buffer->state.render_area.extent.height =
994       framebuffer ? framebuffer->height : 4096;
995 
996    return VK_SUCCESS;
997 }
998 
999 VKAPI_ATTR VkResult VKAPI_CALL
v3dv_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)1000 v3dv_BeginCommandBuffer(VkCommandBuffer commandBuffer,
1001                         const VkCommandBufferBeginInfo *pBeginInfo)
1002 {
1003    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1004 
1005    /* If this is the first vkBeginCommandBuffer, we must initialize the
1006     * command buffer's state. Otherwise, we must reset its state. In both
1007     * cases we reset it.
1008     */
1009    VkResult result = cmd_buffer_reset(cmd_buffer, 0);
1010    if (result != VK_SUCCESS)
1011       return result;
1012 
1013    assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
1014 
1015    cmd_buffer->usage_flags = pBeginInfo->flags;
1016 
1017    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1018       if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1019          result =
1020             cmd_buffer_begin_render_pass_secondary(cmd_buffer,
1021                                                    pBeginInfo->pInheritanceInfo);
1022          if (result != VK_SUCCESS)
1023             return result;
1024       }
1025    }
1026 
1027    cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_RECORDING;
1028 
1029    return VK_SUCCESS;
1030 }
1031 
1032 VKAPI_ATTR VkResult VKAPI_CALL
v3dv_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)1033 v3dv_ResetCommandBuffer(VkCommandBuffer commandBuffer,
1034                         VkCommandBufferResetFlags flags)
1035 {
1036    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1037    return cmd_buffer_reset(cmd_buffer, flags);
1038 }
1039 
1040 static void
cmd_buffer_update_tile_alignment(struct v3dv_cmd_buffer * cmd_buffer)1041 cmd_buffer_update_tile_alignment(struct v3dv_cmd_buffer *cmd_buffer)
1042 {
1043    /* Render areas and scissor/viewport are only relevant inside render passes,
1044     * otherwise we are dealing with transfer operations where these elements
1045     * don't apply.
1046     */
1047    assert(cmd_buffer->state.pass);
1048    const VkRect2D *rect = &cmd_buffer->state.render_area;
1049 
1050    /* We should only call this at the beginning of a subpass so we should
1051     * always have framebuffer information available.
1052     */
1053    assert(cmd_buffer->state.framebuffer);
1054    cmd_buffer->state.tile_aligned_render_area =
1055       v3dv_subpass_area_is_tile_aligned(cmd_buffer->device, rect,
1056                                         cmd_buffer->state.framebuffer,
1057                                         cmd_buffer->state.pass,
1058                                         cmd_buffer->state.subpass_idx);
1059 
1060    if (!cmd_buffer->state.tile_aligned_render_area) {
1061       perf_debug("Render area for subpass %d of render pass %p doesn't "
1062                  "match render pass granularity.\n",
1063                  cmd_buffer->state.subpass_idx, cmd_buffer->state.pass);
1064    }
1065 }
1066 
1067 static void
cmd_buffer_update_attachment_resolve_state(struct v3dv_cmd_buffer * cmd_buffer)1068 cmd_buffer_update_attachment_resolve_state(struct v3dv_cmd_buffer *cmd_buffer)
1069 {
1070    /* NOTE: This should be called after cmd_buffer_update_tile_alignment()
1071     * since it relies on up-to-date information about subpass tile alignment.
1072     */
1073    const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1074    const struct v3dv_render_pass *pass = state->pass;
1075    const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
1076 
1077    for (uint32_t i = 0; i < subpass->color_count; i++) {
1078       const uint32_t attachment_idx = subpass->color_attachments[i].attachment;
1079       if (attachment_idx == VK_ATTACHMENT_UNUSED)
1080          continue;
1081 
1082       state->attachments[attachment_idx].has_resolve =
1083          subpass->resolve_attachments &&
1084          subpass->resolve_attachments[i].attachment != VK_ATTACHMENT_UNUSED;
1085 
1086       state->attachments[attachment_idx].use_tlb_resolve =
1087          state->attachments[attachment_idx].has_resolve &&
1088          state->tile_aligned_render_area &&
1089          pass->attachments[attachment_idx].try_tlb_resolve;
1090    }
1091 
1092    uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
1093    if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
1094       uint32_t ds_resolve_attachment_idx =
1095          subpass->ds_resolve_attachment.attachment;
1096       state->attachments[ds_attachment_idx].has_resolve =
1097          ds_resolve_attachment_idx != VK_ATTACHMENT_UNUSED;
1098 
1099       assert(!state->attachments[ds_attachment_idx].has_resolve ||
1100              (subpass->resolve_depth || subpass->resolve_stencil));
1101 
1102       state->attachments[ds_attachment_idx].use_tlb_resolve =
1103          state->attachments[ds_attachment_idx].has_resolve &&
1104          state->tile_aligned_render_area &&
1105          pass->attachments[ds_attachment_idx].try_tlb_resolve;
1106    }
1107 }
1108 
1109 static void
cmd_buffer_state_set_attachment_clear_color(struct v3dv_cmd_buffer * cmd_buffer,uint32_t attachment_idx,const VkClearColorValue * color)1110 cmd_buffer_state_set_attachment_clear_color(struct v3dv_cmd_buffer *cmd_buffer,
1111                                             uint32_t attachment_idx,
1112                                             const VkClearColorValue *color)
1113 {
1114    assert(attachment_idx < cmd_buffer->state.pass->attachment_count);
1115 
1116    const struct v3dv_render_pass_attachment *attachment =
1117       &cmd_buffer->state.pass->attachments[attachment_idx];
1118 
1119    uint32_t internal_type, internal_bpp;
1120    const struct v3dv_format *format =
1121       v3dv_X(cmd_buffer->device, get_format)(attachment->desc.format);
1122 
1123    v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_output_format)
1124       (format->rt_type, &internal_type, &internal_bpp);
1125 
1126    uint32_t internal_size = 4 << internal_bpp;
1127 
1128    struct v3dv_cmd_buffer_attachment_state *attachment_state =
1129       &cmd_buffer->state.attachments[attachment_idx];
1130 
1131    v3dv_X(cmd_buffer->device, get_hw_clear_color)
1132       (color, internal_type, internal_size, &attachment_state->clear_value.color[0]);
1133 
1134    attachment_state->vk_clear_value.color = *color;
1135 }
1136 
1137 static void
cmd_buffer_state_set_attachment_clear_depth_stencil(struct v3dv_cmd_buffer * cmd_buffer,uint32_t attachment_idx,bool clear_depth,bool clear_stencil,const VkClearDepthStencilValue * ds)1138 cmd_buffer_state_set_attachment_clear_depth_stencil(
1139    struct v3dv_cmd_buffer *cmd_buffer,
1140    uint32_t attachment_idx,
1141    bool clear_depth, bool clear_stencil,
1142    const VkClearDepthStencilValue *ds)
1143 {
1144    struct v3dv_cmd_buffer_attachment_state *attachment_state =
1145       &cmd_buffer->state.attachments[attachment_idx];
1146 
1147    if (clear_depth)
1148       attachment_state->clear_value.z = ds->depth;
1149 
1150    if (clear_stencil)
1151       attachment_state->clear_value.s = ds->stencil;
1152 
1153    attachment_state->vk_clear_value.depthStencil = *ds;
1154 }
1155 
1156 static void
cmd_buffer_state_set_clear_values(struct v3dv_cmd_buffer * cmd_buffer,uint32_t count,const VkClearValue * values)1157 cmd_buffer_state_set_clear_values(struct v3dv_cmd_buffer *cmd_buffer,
1158                                   uint32_t count, const VkClearValue *values)
1159 {
1160    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1161    const struct v3dv_render_pass *pass = state->pass;
1162 
1163    /* There could be less clear values than attachments in the render pass, in
1164     * which case we only want to process as many as we have, or there could be
1165     * more, in which case we want to ignore those for which we don't have a
1166     * corresponding attachment.
1167     */
1168    count = MIN2(count, pass->attachment_count);
1169    for (uint32_t i = 0; i < count; i++) {
1170       const struct v3dv_render_pass_attachment *attachment =
1171          &pass->attachments[i];
1172 
1173       if (attachment->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
1174          continue;
1175 
1176       VkImageAspectFlags aspects = vk_format_aspects(attachment->desc.format);
1177       if (aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
1178          cmd_buffer_state_set_attachment_clear_color(cmd_buffer, i,
1179                                                      &values[i].color);
1180       } else if (aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1181                             VK_IMAGE_ASPECT_STENCIL_BIT)) {
1182          cmd_buffer_state_set_attachment_clear_depth_stencil(
1183             cmd_buffer, i,
1184             aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1185             aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
1186             &values[i].depthStencil);
1187       }
1188    }
1189 }
1190 
1191 static void
cmd_buffer_state_set_attachments(struct v3dv_cmd_buffer * cmd_buffer,const VkRenderPassBeginInfo * pRenderPassBegin)1192 cmd_buffer_state_set_attachments(struct v3dv_cmd_buffer *cmd_buffer,
1193                                  const VkRenderPassBeginInfo *pRenderPassBegin)
1194 {
1195    V3DV_FROM_HANDLE(v3dv_render_pass, pass, pRenderPassBegin->renderPass);
1196    V3DV_FROM_HANDLE(v3dv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1197 
1198    const VkRenderPassAttachmentBeginInfoKHR *attach_begin =
1199       vk_find_struct_const(pRenderPassBegin, RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR);
1200 
1201    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1202 
1203    for (uint32_t i = 0; i < pass->attachment_count; i++) {
1204       if (attach_begin && attach_begin->attachmentCount != 0) {
1205          state->attachments[i].image_view =
1206             v3dv_image_view_from_handle(attach_begin->pAttachments[i]);
1207       } else if (framebuffer) {
1208          state->attachments[i].image_view = framebuffer->attachments[i];
1209       } else {
1210          assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1211          state->attachments[i].image_view = NULL;
1212       }
1213    }
1214 }
1215 
1216 static void
cmd_buffer_init_render_pass_attachment_state(struct v3dv_cmd_buffer * cmd_buffer,const VkRenderPassBeginInfo * pRenderPassBegin)1217 cmd_buffer_init_render_pass_attachment_state(struct v3dv_cmd_buffer *cmd_buffer,
1218                                              const VkRenderPassBeginInfo *pRenderPassBegin)
1219 {
1220    cmd_buffer_state_set_clear_values(cmd_buffer,
1221                                      pRenderPassBegin->clearValueCount,
1222                                      pRenderPassBegin->pClearValues);
1223 
1224    cmd_buffer_state_set_attachments(cmd_buffer, pRenderPassBegin);
1225 }
1226 
1227 static void
cmd_buffer_ensure_render_pass_attachment_state(struct v3dv_cmd_buffer * cmd_buffer)1228 cmd_buffer_ensure_render_pass_attachment_state(struct v3dv_cmd_buffer *cmd_buffer)
1229 {
1230    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1231    const struct v3dv_render_pass *pass = state->pass;
1232 
1233    if (state->attachment_alloc_count < pass->attachment_count) {
1234       if (state->attachments > 0) {
1235          assert(state->attachment_alloc_count > 0);
1236          vk_free(&cmd_buffer->device->vk.alloc, state->attachments);
1237       }
1238 
1239       uint32_t size = sizeof(struct v3dv_cmd_buffer_attachment_state) *
1240                       pass->attachment_count;
1241       state->attachments = vk_zalloc(&cmd_buffer->device->vk.alloc, size, 8,
1242                                      VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1243       if (!state->attachments) {
1244          v3dv_flag_oom(cmd_buffer, NULL);
1245          return;
1246       }
1247       state->attachment_alloc_count = pass->attachment_count;
1248    }
1249 
1250    assert(state->attachment_alloc_count >= pass->attachment_count);
1251 }
1252 
1253 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,const VkSubpassBeginInfo * pSubpassBeginInfo)1254 v3dv_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
1255                          const VkRenderPassBeginInfo *pRenderPassBegin,
1256                          const VkSubpassBeginInfo *pSubpassBeginInfo)
1257 {
1258    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1259    V3DV_FROM_HANDLE(v3dv_render_pass, pass, pRenderPassBegin->renderPass);
1260    V3DV_FROM_HANDLE(v3dv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1261 
1262    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1263    state->pass = pass;
1264    state->framebuffer = framebuffer;
1265 
1266    cmd_buffer_ensure_render_pass_attachment_state(cmd_buffer);
1267    v3dv_return_if_oom(cmd_buffer, NULL);
1268 
1269    cmd_buffer_init_render_pass_attachment_state(cmd_buffer, pRenderPassBegin);
1270 
1271    state->render_area = pRenderPassBegin->renderArea;
1272 
1273    /* If our render area is smaller than the current clip window we will have
1274     * to emit a new clip window to constraint it to the render area.
1275     */
1276    uint32_t min_render_x = state->render_area.offset.x;
1277    uint32_t min_render_y = state->render_area.offset.y;
1278    uint32_t max_render_x = min_render_x + state->render_area.extent.width - 1;
1279    uint32_t max_render_y = min_render_y + state->render_area.extent.height - 1;
1280    uint32_t min_clip_x = state->clip_window.offset.x;
1281    uint32_t min_clip_y = state->clip_window.offset.y;
1282    uint32_t max_clip_x = min_clip_x + state->clip_window.extent.width - 1;
1283    uint32_t max_clip_y = min_clip_y + state->clip_window.extent.height - 1;
1284    if (min_render_x > min_clip_x || min_render_y > min_clip_y ||
1285        max_render_x < max_clip_x || max_render_y < max_clip_y) {
1286       state->dirty |= V3DV_CMD_DIRTY_SCISSOR;
1287    }
1288 
1289    /* Setup for first subpass */
1290    v3dv_cmd_buffer_subpass_start(cmd_buffer, 0);
1291 }
1292 
1293 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdNextSubpass2(VkCommandBuffer commandBuffer,const VkSubpassBeginInfo * pSubpassBeginInfo,const VkSubpassEndInfo * pSubpassEndInfo)1294 v3dv_CmdNextSubpass2(VkCommandBuffer commandBuffer,
1295                      const VkSubpassBeginInfo *pSubpassBeginInfo,
1296                      const VkSubpassEndInfo *pSubpassEndInfo)
1297 {
1298    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1299 
1300    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1301    assert(state->subpass_idx < state->pass->subpass_count - 1);
1302 
1303    /* Finish the previous subpass */
1304    v3dv_cmd_buffer_subpass_finish(cmd_buffer);
1305    cmd_buffer_subpass_handle_pending_resolves(cmd_buffer);
1306 
1307    /* Start the next subpass */
1308    v3dv_cmd_buffer_subpass_start(cmd_buffer, state->subpass_idx + 1);
1309 }
1310 
1311 static void
cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer * cmd_buffer)1312 cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer *cmd_buffer)
1313 {
1314    assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1315 
1316    assert(cmd_buffer->state.pass);
1317    assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
1318    const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1319    const struct v3dv_render_pass *pass = state->pass;
1320    const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
1321 
1322    /* We only need to emit subpass clears as draw calls when the render
1323     * area is not aligned to tile boundaries or for GFXH-1461.
1324     */
1325    if (cmd_buffer->state.tile_aligned_render_area &&
1326        !subpass->do_depth_clear_with_draw &&
1327        !subpass->do_depth_clear_with_draw) {
1328       return;
1329    }
1330 
1331    uint32_t att_count = 0;
1332    VkClearAttachment atts[V3D_MAX_DRAW_BUFFERS + 1]; /* 4 color + D/S */
1333 
1334    /* We only need to emit subpass clears as draw calls for color attachments
1335     * if the render area is not aligned to tile boundaries.
1336     */
1337    if (!cmd_buffer->state.tile_aligned_render_area) {
1338       for (uint32_t i = 0; i < subpass->color_count; i++) {
1339          const uint32_t att_idx = subpass->color_attachments[i].attachment;
1340          if (att_idx == VK_ATTACHMENT_UNUSED)
1341             continue;
1342 
1343          struct v3dv_render_pass_attachment *att = &pass->attachments[att_idx];
1344          if (att->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
1345             continue;
1346 
1347          if (state->subpass_idx != att->first_subpass)
1348             continue;
1349 
1350          atts[att_count].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1351          atts[att_count].colorAttachment = i;
1352          atts[att_count].clearValue = state->attachments[att_idx].vk_clear_value;
1353          att_count++;
1354       }
1355    }
1356 
1357    /* For D/S we may also need to emit a subpass clear for GFXH-1461 */
1358    const uint32_t ds_att_idx = subpass->ds_attachment.attachment;
1359    if (ds_att_idx != VK_ATTACHMENT_UNUSED) {
1360       struct v3dv_render_pass_attachment *att = &pass->attachments[ds_att_idx];
1361       if (state->subpass_idx == att->first_subpass) {
1362          VkImageAspectFlags aspects = vk_format_aspects(att->desc.format);
1363          if (att->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR ||
1364              (cmd_buffer->state.tile_aligned_render_area &&
1365               !subpass->do_depth_clear_with_draw)) {
1366             aspects &= ~VK_IMAGE_ASPECT_DEPTH_BIT;
1367          }
1368          if (att->desc.stencilLoadOp != VK_ATTACHMENT_LOAD_OP_CLEAR ||
1369              (cmd_buffer->state.tile_aligned_render_area &&
1370               !subpass->do_stencil_clear_with_draw)) {
1371             aspects &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
1372          }
1373          if (aspects) {
1374             atts[att_count].aspectMask = aspects;
1375             atts[att_count].colorAttachment = 0; /* Ignored */
1376             atts[att_count].clearValue =
1377                state->attachments[ds_att_idx].vk_clear_value;
1378             att_count++;
1379          }
1380       }
1381    }
1382 
1383    if (att_count == 0)
1384       return;
1385 
1386    if (!cmd_buffer->state.tile_aligned_render_area) {
1387       perf_debug("Render area doesn't match render pass granularity, falling "
1388                  "back to vkCmdClearAttachments for "
1389                  "VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
1390    } else if (subpass->do_depth_clear_with_draw ||
1391               subpass->do_stencil_clear_with_draw) {
1392       perf_debug("Subpass clears DEPTH but loads STENCIL (or viceversa), "
1393                  "falling back to vkCmdClearAttachments for "
1394                  "VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
1395    }
1396 
1397    /* From the Vulkan 1.0 spec:
1398     *
1399     *    "VK_ATTACHMENT_LOAD_OP_CLEAR specifies that the contents within the
1400     *     render area will be cleared to a uniform value, which is specified
1401     *     when a render pass instance is begun."
1402     *
1403     * So the clear is only constrained by the render area and not by pipeline
1404     * state such as scissor or viewport, these are the semantics of
1405     * vkCmdClearAttachments as well.
1406     */
1407    VkCommandBuffer _cmd_buffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
1408    VkClearRect rect = {
1409       .rect = state->render_area,
1410       .baseArrayLayer = 0,
1411       .layerCount = 1,
1412    };
1413    v3dv_CmdClearAttachments(_cmd_buffer, att_count, atts, 1, &rect);
1414 }
1415 
1416 static struct v3dv_job *
cmd_buffer_subpass_create_job(struct v3dv_cmd_buffer * cmd_buffer,uint32_t subpass_idx,enum v3dv_job_type type)1417 cmd_buffer_subpass_create_job(struct v3dv_cmd_buffer *cmd_buffer,
1418                               uint32_t subpass_idx,
1419                               enum v3dv_job_type type)
1420 {
1421    assert(type == V3DV_JOB_TYPE_GPU_CL ||
1422           type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
1423 
1424    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1425    assert(subpass_idx < state->pass->subpass_count);
1426 
1427    /* Starting a new job can trigger a finish of the current one, so don't
1428     * change the command buffer state for the new job until we are done creating
1429     * the new job.
1430     */
1431    struct v3dv_job *job =
1432       v3dv_cmd_buffer_start_job(cmd_buffer, subpass_idx, type);
1433    if (!job)
1434       return NULL;
1435 
1436    state->subpass_idx = subpass_idx;
1437 
1438    /* If we are starting a new job we need to setup binning. We only do this
1439     * for V3DV_JOB_TYPE_GPU_CL jobs because V3DV_JOB_TYPE_GPU_CL_SECONDARY
1440     * jobs are not submitted to the GPU directly, and are instead meant to be
1441     * branched to from other V3DV_JOB_TYPE_GPU_CL jobs.
1442     */
1443    if (type == V3DV_JOB_TYPE_GPU_CL &&
1444        job->first_subpass == state->subpass_idx) {
1445       const struct v3dv_subpass *subpass =
1446          &state->pass->subpasses[state->subpass_idx];
1447 
1448       const struct v3dv_framebuffer *framebuffer = state->framebuffer;
1449 
1450       uint8_t internal_bpp;
1451       bool msaa;
1452       v3dv_X(job->device, framebuffer_compute_internal_bpp_msaa)
1453          (framebuffer, state->attachments, subpass, &internal_bpp, &msaa);
1454 
1455       /* From the Vulkan spec:
1456        *
1457        *    "If the render pass uses multiview, then layers must be one and
1458        *     each attachment requires a number of layers that is greater than
1459        *     the maximum bit index set in the view mask in the subpasses in
1460        *     which it is used."
1461        *
1462        * So when multiview is enabled, we take the number of layers from the
1463        * last bit set in the view mask.
1464        */
1465       uint32_t layers = framebuffer->layers;
1466       if (subpass->view_mask != 0) {
1467          assert(framebuffer->layers == 1);
1468          layers = util_last_bit(subpass->view_mask);
1469       }
1470 
1471       v3dv_job_start_frame(job,
1472                            framebuffer->width,
1473                            framebuffer->height,
1474                            layers,
1475                            true,
1476                            subpass->color_count,
1477                            internal_bpp,
1478                            msaa);
1479    }
1480 
1481    return job;
1482 }
1483 
1484 struct v3dv_job *
v3dv_cmd_buffer_subpass_start(struct v3dv_cmd_buffer * cmd_buffer,uint32_t subpass_idx)1485 v3dv_cmd_buffer_subpass_start(struct v3dv_cmd_buffer *cmd_buffer,
1486                               uint32_t subpass_idx)
1487 {
1488    assert(cmd_buffer->state.pass);
1489    assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
1490 
1491    struct v3dv_job *job =
1492       cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
1493                                     V3DV_JOB_TYPE_GPU_CL);
1494    if (!job)
1495       return NULL;
1496 
1497    /* Check if our render area is aligned to tile boundaries. We have to do
1498     * this in each subpass because the subset of attachments used can change
1499     * and with that the tile size selected by the hardware can change too.
1500     */
1501    cmd_buffer_update_tile_alignment(cmd_buffer);
1502 
1503    cmd_buffer_update_attachment_resolve_state(cmd_buffer);
1504 
1505    /* If we can't use TLB clears then we need to emit draw clears for any
1506     * LOAD_OP_CLEAR attachments in this subpass now. We might also need to emit
1507     * Depth/Stencil clears if we hit GFXH-1461.
1508     *
1509     * Secondary command buffers don't start subpasses (and may not even have
1510     * framebuffer state), so we only care about this in primaries. The only
1511     * exception could be a secondary runnning inside a subpass that needs to
1512     * record a meta operation (with its own render pass) that relies on
1513     * attachment load clears, but we don't have any instances of that right
1514     * now.
1515     */
1516    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
1517       cmd_buffer_emit_subpass_clears(cmd_buffer);
1518 
1519    return job;
1520 }
1521 
1522 struct v3dv_job *
v3dv_cmd_buffer_subpass_resume(struct v3dv_cmd_buffer * cmd_buffer,uint32_t subpass_idx)1523 v3dv_cmd_buffer_subpass_resume(struct v3dv_cmd_buffer *cmd_buffer,
1524                                uint32_t subpass_idx)
1525 {
1526    assert(cmd_buffer->state.pass);
1527    assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
1528 
1529    struct v3dv_job *job;
1530    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
1531       job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
1532                                           V3DV_JOB_TYPE_GPU_CL);
1533    } else {
1534       assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1535       job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
1536                                           V3DV_JOB_TYPE_GPU_CL_SECONDARY);
1537    }
1538 
1539    if (!job)
1540       return NULL;
1541 
1542    job->is_subpass_continue = true;
1543 
1544    return job;
1545 }
1546 
1547 void
v3dv_cmd_buffer_subpass_finish(struct v3dv_cmd_buffer * cmd_buffer)1548 v3dv_cmd_buffer_subpass_finish(struct v3dv_cmd_buffer *cmd_buffer)
1549 {
1550    /* We can end up here without a job if the last command recorded into the
1551     * subpass already finished the job (for example a pipeline barrier). In
1552     * that case we miss to set the is_subpass_finish flag, but that is not
1553     * required for proper behavior.
1554     */
1555    struct v3dv_job *job = cmd_buffer->state.job;
1556    if (job)
1557       job->is_subpass_finish = true;
1558 }
1559 
1560 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdEndRenderPass2(VkCommandBuffer commandBuffer,const VkSubpassEndInfo * pSubpassEndInfo)1561 v3dv_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
1562                        const VkSubpassEndInfo *pSubpassEndInfo)
1563 {
1564    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1565 
1566    /* Finalize last subpass */
1567    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1568    assert(state->subpass_idx == state->pass->subpass_count - 1);
1569    v3dv_cmd_buffer_subpass_finish(cmd_buffer);
1570    v3dv_cmd_buffer_finish_job(cmd_buffer);
1571 
1572    cmd_buffer_subpass_handle_pending_resolves(cmd_buffer);
1573 
1574    /* We are no longer inside a render pass */
1575    state->framebuffer = NULL;
1576    state->pass = NULL;
1577    state->subpass_idx = -1;
1578 }
1579 
1580 VKAPI_ATTR VkResult VKAPI_CALL
v3dv_EndCommandBuffer(VkCommandBuffer commandBuffer)1581 v3dv_EndCommandBuffer(VkCommandBuffer commandBuffer)
1582 {
1583    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1584 
1585    if (cmd_buffer->state.oom)
1586       return VK_ERROR_OUT_OF_HOST_MEMORY;
1587 
1588    /* Primaries should have ended any recording jobs by the time they hit
1589     * vkEndRenderPass (if we are inside a render pass). Commands outside
1590     * a render pass instance (for both primaries and secondaries) spawn
1591     * complete jobs too. So the only case where we can get here without
1592     * finishing a recording job is when we are recording a secondary
1593     * inside a render pass.
1594     */
1595    if (cmd_buffer->state.job) {
1596       assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
1597              cmd_buffer->state.pass);
1598       v3dv_cmd_buffer_finish_job(cmd_buffer);
1599    }
1600 
1601    cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_EXECUTABLE;
1602 
1603    return VK_SUCCESS;
1604 }
1605 
1606 static void
clone_bo_list(struct v3dv_cmd_buffer * cmd_buffer,struct list_head * dst,struct list_head * src)1607 clone_bo_list(struct v3dv_cmd_buffer *cmd_buffer,
1608               struct list_head *dst,
1609               struct list_head *src)
1610 {
1611    assert(cmd_buffer);
1612 
1613    list_inithead(dst);
1614    list_for_each_entry(struct v3dv_bo, bo, src, list_link) {
1615       struct v3dv_bo *clone_bo =
1616          vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(struct v3dv_bo), 8,
1617                   VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1618       if (!clone_bo) {
1619          v3dv_flag_oom(cmd_buffer, NULL);
1620          return;
1621       }
1622 
1623       *clone_bo = *bo;
1624       list_addtail(&clone_bo->list_link, dst);
1625    }
1626 }
1627 
1628 /* Clones a job for inclusion in the given command buffer. Note that this
1629  * doesn't make a deep copy so the cloned job it doesn't own any resources.
1630  * Useful when we need to have a job in more than one list, which happens
1631  * for jobs recorded in secondary command buffers when we want to execute
1632  * them in primaries.
1633  */
1634 struct v3dv_job *
v3dv_job_clone_in_cmd_buffer(struct v3dv_job * job,struct v3dv_cmd_buffer * cmd_buffer)1635 v3dv_job_clone_in_cmd_buffer(struct v3dv_job *job,
1636                              struct v3dv_cmd_buffer *cmd_buffer)
1637 {
1638    struct v3dv_job *clone_job = vk_alloc(&job->device->vk.alloc,
1639                                          sizeof(struct v3dv_job), 8,
1640                                          VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1641    if (!clone_job) {
1642       v3dv_flag_oom(cmd_buffer, NULL);
1643       return NULL;
1644    }
1645 
1646    /* Cloned jobs don't duplicate resources! */
1647    *clone_job = *job;
1648    clone_job->is_clone = true;
1649    clone_job->cmd_buffer = cmd_buffer;
1650    list_addtail(&clone_job->list_link, &cmd_buffer->jobs);
1651 
1652    /* We need to regen the BO lists so that they point to the BO list in the
1653     * cloned job. Otherwise functions like list_length() will loop forever.
1654     */
1655    if (job->type == V3DV_JOB_TYPE_GPU_CL) {
1656       clone_bo_list(cmd_buffer, &clone_job->bcl.bo_list, &job->bcl.bo_list);
1657       clone_bo_list(cmd_buffer, &clone_job->rcl.bo_list, &job->rcl.bo_list);
1658       clone_bo_list(cmd_buffer, &clone_job->indirect.bo_list,
1659                     &job->indirect.bo_list);
1660    }
1661 
1662    return clone_job;
1663 }
1664 
1665 static void
cmd_buffer_execute_outside_pass(struct v3dv_cmd_buffer * primary,uint32_t cmd_buffer_count,const VkCommandBuffer * cmd_buffers)1666 cmd_buffer_execute_outside_pass(struct v3dv_cmd_buffer *primary,
1667                                 uint32_t cmd_buffer_count,
1668                                 const VkCommandBuffer *cmd_buffers)
1669 {
1670    bool pending_barrier = false;
1671    bool pending_bcl_barrier = false;
1672    for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1673       V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
1674 
1675       assert(!(secondary->usage_flags &
1676                VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
1677 
1678       /* Secondary command buffers that execute outside a render pass create
1679        * complete jobs with an RCL and tile setup, so we simply want to merge
1680        * their job list into the primary's. However, because they may be
1681        * executed into multiple primaries at the same time and we only have a
1682        * single list_link in each job, we can't just add then to the primary's
1683        * job list and we instead have to clone them first.
1684        *
1685        * Alternatively, we could create a "execute secondary" CPU job that
1686        * when executed in a queue, would submit all the jobs in the referenced
1687        * secondary command buffer. However, this would raise some challenges
1688        * to make it work with the implementation of wait threads in the queue
1689        * which we use for event waits, for example.
1690        */
1691       list_for_each_entry(struct v3dv_job, secondary_job,
1692                           &secondary->jobs, list_link) {
1693          /* These can only happen inside a render pass */
1694          assert(secondary_job->type != V3DV_JOB_TYPE_GPU_CL_SECONDARY);
1695          struct v3dv_job *job = v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
1696          if (!job)
1697             return;
1698 
1699          if (pending_barrier) {
1700             job->serialize = true;
1701             if (pending_bcl_barrier)
1702                job->needs_bcl_sync = true;
1703             pending_barrier = false;
1704             pending_bcl_barrier = false;
1705          }
1706       }
1707 
1708       /* If this secondary had any pending barrier state we will need that
1709        * barrier state consumed with whatever comes after it (first job in
1710        * the next secondary or the primary, if this was the last secondary).
1711        */
1712       assert(secondary->state.has_barrier || !secondary->state.has_bcl_barrier);
1713       pending_barrier = secondary->state.has_barrier;
1714       pending_bcl_barrier = secondary->state.has_bcl_barrier;
1715    }
1716 
1717    if (pending_barrier) {
1718       primary->state.has_barrier = true;
1719       primary->state.has_bcl_barrier |= pending_bcl_barrier;
1720    }
1721 }
1722 
1723 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)1724 v3dv_CmdExecuteCommands(VkCommandBuffer commandBuffer,
1725                         uint32_t commandBufferCount,
1726                         const VkCommandBuffer *pCommandBuffers)
1727 {
1728    V3DV_FROM_HANDLE(v3dv_cmd_buffer, primary, commandBuffer);
1729 
1730    if (primary->state.pass != NULL) {
1731       v3dv_X(primary->device, cmd_buffer_execute_inside_pass)
1732          (primary, commandBufferCount, pCommandBuffers);
1733    } else {
1734       cmd_buffer_execute_outside_pass(primary,
1735                                       commandBufferCount, pCommandBuffers);
1736    }
1737 }
1738 
1739 /* This goes though the list of possible dynamic states in the pipeline and,
1740  * for those that are not configured as dynamic, copies relevant state into
1741  * the command buffer.
1742  */
1743 static void
cmd_buffer_bind_pipeline_static_state(struct v3dv_cmd_buffer * cmd_buffer,const struct v3dv_dynamic_state * src)1744 cmd_buffer_bind_pipeline_static_state(struct v3dv_cmd_buffer *cmd_buffer,
1745                                       const struct v3dv_dynamic_state *src)
1746 {
1747    struct v3dv_dynamic_state *dest = &cmd_buffer->state.dynamic;
1748    uint32_t dynamic_mask = src->mask;
1749    uint32_t dirty = 0;
1750 
1751    if (!(dynamic_mask & V3DV_DYNAMIC_VIEWPORT)) {
1752       dest->viewport.count = src->viewport.count;
1753       if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
1754                  src->viewport.count * sizeof(VkViewport))) {
1755          typed_memcpy(dest->viewport.viewports,
1756                       src->viewport.viewports,
1757                       src->viewport.count);
1758          typed_memcpy(dest->viewport.scale, src->viewport.scale,
1759                       src->viewport.count);
1760          typed_memcpy(dest->viewport.translate, src->viewport.translate,
1761                       src->viewport.count);
1762          dirty |= V3DV_CMD_DIRTY_VIEWPORT;
1763       }
1764    }
1765 
1766    if (!(dynamic_mask & V3DV_DYNAMIC_SCISSOR)) {
1767       dest->scissor.count = src->scissor.count;
1768       if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
1769                  src->scissor.count * sizeof(VkRect2D))) {
1770          typed_memcpy(dest->scissor.scissors,
1771                       src->scissor.scissors, src->scissor.count);
1772          dirty |= V3DV_CMD_DIRTY_SCISSOR;
1773       }
1774    }
1775 
1776    if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK)) {
1777       if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
1778                  sizeof(src->stencil_compare_mask))) {
1779          dest->stencil_compare_mask = src->stencil_compare_mask;
1780          dirty |= V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK;
1781       }
1782    }
1783 
1784    if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK)) {
1785       if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
1786                  sizeof(src->stencil_write_mask))) {
1787          dest->stencil_write_mask = src->stencil_write_mask;
1788          dirty |= V3DV_CMD_DIRTY_STENCIL_WRITE_MASK;
1789       }
1790    }
1791 
1792    if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_REFERENCE)) {
1793       if (memcmp(&dest->stencil_reference, &src->stencil_reference,
1794                  sizeof(src->stencil_reference))) {
1795          dest->stencil_reference = src->stencil_reference;
1796          dirty |= V3DV_CMD_DIRTY_STENCIL_REFERENCE;
1797       }
1798    }
1799 
1800    if (!(dynamic_mask & V3DV_DYNAMIC_BLEND_CONSTANTS)) {
1801       if (memcmp(dest->blend_constants, src->blend_constants,
1802                  sizeof(src->blend_constants))) {
1803          memcpy(dest->blend_constants, src->blend_constants,
1804                 sizeof(src->blend_constants));
1805          dirty |= V3DV_CMD_DIRTY_BLEND_CONSTANTS;
1806       }
1807    }
1808 
1809    if (!(dynamic_mask & V3DV_DYNAMIC_DEPTH_BIAS)) {
1810       if (memcmp(&dest->depth_bias, &src->depth_bias,
1811                  sizeof(src->depth_bias))) {
1812          memcpy(&dest->depth_bias, &src->depth_bias, sizeof(src->depth_bias));
1813          dirty |= V3DV_CMD_DIRTY_DEPTH_BIAS;
1814       }
1815    }
1816 
1817    if (!(dynamic_mask & V3DV_DYNAMIC_LINE_WIDTH)) {
1818       if (dest->line_width != src->line_width) {
1819          dest->line_width = src->line_width;
1820          dirty |= V3DV_CMD_DIRTY_LINE_WIDTH;
1821       }
1822    }
1823 
1824    if (!(dynamic_mask & V3DV_DYNAMIC_COLOR_WRITE_ENABLE)) {
1825       if (dest->color_write_enable != src->color_write_enable) {
1826          dest->color_write_enable = src->color_write_enable;
1827          dirty |= V3DV_CMD_DIRTY_COLOR_WRITE_ENABLE;
1828       }
1829    }
1830 
1831    cmd_buffer->state.dynamic.mask = dynamic_mask;
1832    cmd_buffer->state.dirty |= dirty;
1833 }
1834 
1835 static void
bind_graphics_pipeline(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_pipeline * pipeline)1836 bind_graphics_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
1837                        struct v3dv_pipeline *pipeline)
1838 {
1839    assert(pipeline && !(pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT));
1840    if (cmd_buffer->state.gfx.pipeline == pipeline)
1841       return;
1842 
1843    cmd_buffer->state.gfx.pipeline = pipeline;
1844 
1845    cmd_buffer_bind_pipeline_static_state(cmd_buffer, &pipeline->dynamic_state);
1846 
1847    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_PIPELINE;
1848 }
1849 
1850 static void
bind_compute_pipeline(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_pipeline * pipeline)1851 bind_compute_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
1852                       struct v3dv_pipeline *pipeline)
1853 {
1854    assert(pipeline && pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
1855 
1856    if (cmd_buffer->state.compute.pipeline == pipeline)
1857       return;
1858 
1859    cmd_buffer->state.compute.pipeline = pipeline;
1860    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_COMPUTE_PIPELINE;
1861 }
1862 
1863 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline _pipeline)1864 v3dv_CmdBindPipeline(VkCommandBuffer commandBuffer,
1865                      VkPipelineBindPoint pipelineBindPoint,
1866                      VkPipeline _pipeline)
1867 {
1868    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1869    V3DV_FROM_HANDLE(v3dv_pipeline, pipeline, _pipeline);
1870 
1871    switch (pipelineBindPoint) {
1872    case VK_PIPELINE_BIND_POINT_COMPUTE:
1873       bind_compute_pipeline(cmd_buffer, pipeline);
1874       break;
1875 
1876    case VK_PIPELINE_BIND_POINT_GRAPHICS:
1877       bind_graphics_pipeline(cmd_buffer, pipeline);
1878       break;
1879 
1880    default:
1881       assert(!"invalid bind point");
1882       break;
1883    }
1884 }
1885 
1886 /* FIXME: C&P from radv. tu has similar code. Perhaps common place? */
1887 void
v3dv_viewport_compute_xform(const VkViewport * viewport,float scale[3],float translate[3])1888 v3dv_viewport_compute_xform(const VkViewport *viewport,
1889                             float scale[3],
1890                             float translate[3])
1891 {
1892    float x = viewport->x;
1893    float y = viewport->y;
1894    float half_width = 0.5f * viewport->width;
1895    float half_height = 0.5f * viewport->height;
1896    double n = viewport->minDepth;
1897    double f = viewport->maxDepth;
1898 
1899    scale[0] = half_width;
1900    translate[0] = half_width + x;
1901    scale[1] = half_height;
1902    translate[1] = half_height + y;
1903 
1904    scale[2] = (f - n);
1905    translate[2] = n;
1906 
1907    /* It seems that if the scale is small enough the hardware won't clip
1908     * correctly so we work around this my choosing the smallest scale that
1909     * seems to work.
1910     *
1911     * This case is exercised by CTS:
1912     * dEQP-VK.draw.inverted_depth_ranges.nodepthclamp_deltazero
1913     */
1914    const float min_abs_scale = 0.000009f;
1915    if (fabs(scale[2]) < min_abs_scale)
1916       scale[2] = min_abs_scale * (scale[2] < 0 ? -1.0f : 1.0f);
1917 }
1918 
1919 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)1920 v3dv_CmdSetViewport(VkCommandBuffer commandBuffer,
1921                     uint32_t firstViewport,
1922                     uint32_t viewportCount,
1923                     const VkViewport *pViewports)
1924 {
1925    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1926    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1927    const uint32_t total_count = firstViewport + viewportCount;
1928 
1929    assert(firstViewport < MAX_VIEWPORTS);
1930    assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
1931 
1932    if (state->dynamic.viewport.count < total_count)
1933       state->dynamic.viewport.count = total_count;
1934 
1935    if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
1936                pViewports, viewportCount * sizeof(*pViewports))) {
1937       return;
1938    }
1939 
1940    memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
1941           viewportCount * sizeof(*pViewports));
1942 
1943    for (uint32_t i = firstViewport; i < total_count; i++) {
1944       v3dv_viewport_compute_xform(&state->dynamic.viewport.viewports[i],
1945                                   state->dynamic.viewport.scale[i],
1946                                   state->dynamic.viewport.translate[i]);
1947    }
1948 
1949    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_VIEWPORT;
1950 }
1951 
1952 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)1953 v3dv_CmdSetScissor(VkCommandBuffer commandBuffer,
1954                    uint32_t firstScissor,
1955                    uint32_t scissorCount,
1956                    const VkRect2D *pScissors)
1957 {
1958    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
1959    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1960 
1961    assert(firstScissor < MAX_SCISSORS);
1962    assert(firstScissor + scissorCount >= 1 &&
1963           firstScissor + scissorCount <= MAX_SCISSORS);
1964 
1965    if (state->dynamic.scissor.count < firstScissor + scissorCount)
1966       state->dynamic.scissor.count = firstScissor + scissorCount;
1967 
1968    if (!memcmp(state->dynamic.scissor.scissors + firstScissor,
1969                pScissors, scissorCount * sizeof(*pScissors))) {
1970       return;
1971    }
1972 
1973    memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
1974           scissorCount * sizeof(*pScissors));
1975 
1976    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_SCISSOR;
1977 }
1978 
1979 static void
emit_scissor(struct v3dv_cmd_buffer * cmd_buffer)1980 emit_scissor(struct v3dv_cmd_buffer *cmd_buffer)
1981 {
1982    if (cmd_buffer->state.dynamic.viewport.count == 0)
1983       return;
1984 
1985    struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1986 
1987    /* FIXME: right now we only support one viewport. viewporst[0] would work
1988     * now, but would need to change if we allow multiple viewports.
1989     */
1990    float *vptranslate = dynamic->viewport.translate[0];
1991    float *vpscale = dynamic->viewport.scale[0];
1992 
1993    float vp_minx = -fabsf(vpscale[0]) + vptranslate[0];
1994    float vp_maxx = fabsf(vpscale[0]) + vptranslate[0];
1995    float vp_miny = -fabsf(vpscale[1]) + vptranslate[1];
1996    float vp_maxy = fabsf(vpscale[1]) + vptranslate[1];
1997 
1998    /* Quoting from v3dx_emit:
1999     * "Clip to the scissor if it's enabled, but still clip to the
2000     * drawable regardless since that controls where the binner
2001     * tries to put things.
2002     *
2003     * Additionally, always clip the rendering to the viewport,
2004     * since the hardware does guardband clipping, meaning
2005     * primitives would rasterize outside of the view volume."
2006     */
2007    uint32_t minx, miny, maxx, maxy;
2008 
2009    /* From the Vulkan spec:
2010     *
2011     * "The application must ensure (using scissor if necessary) that all
2012     *  rendering is contained within the render area. The render area must be
2013     *  contained within the framebuffer dimensions."
2014     *
2015     * So it is the application's responsibility to ensure this. Still, we can
2016     * help by automatically restricting the scissor rect to the render area.
2017     */
2018    minx = MAX2(vp_minx, cmd_buffer->state.render_area.offset.x);
2019    miny = MAX2(vp_miny, cmd_buffer->state.render_area.offset.y);
2020    maxx = MIN2(vp_maxx, cmd_buffer->state.render_area.offset.x +
2021                         cmd_buffer->state.render_area.extent.width);
2022    maxy = MIN2(vp_maxy, cmd_buffer->state.render_area.offset.y +
2023                         cmd_buffer->state.render_area.extent.height);
2024 
2025    minx = vp_minx;
2026    miny = vp_miny;
2027    maxx = vp_maxx;
2028    maxy = vp_maxy;
2029 
2030    /* Clip against user provided scissor if needed.
2031     *
2032     * FIXME: right now we only allow one scissor. Below would need to be
2033     * updated if we support more
2034     */
2035    if (dynamic->scissor.count > 0) {
2036       VkRect2D *scissor = &dynamic->scissor.scissors[0];
2037       minx = MAX2(minx, scissor->offset.x);
2038       miny = MAX2(miny, scissor->offset.y);
2039       maxx = MIN2(maxx, scissor->offset.x + scissor->extent.width);
2040       maxy = MIN2(maxy, scissor->offset.y + scissor->extent.height);
2041    }
2042 
2043    /* If the scissor is outside the viewport area we end up with
2044     * min{x,y} > max{x,y}.
2045     */
2046    if (minx > maxx)
2047       maxx = minx;
2048    if (miny > maxy)
2049       maxy = miny;
2050 
2051    cmd_buffer->state.clip_window.offset.x = minx;
2052    cmd_buffer->state.clip_window.offset.y = miny;
2053    cmd_buffer->state.clip_window.extent.width = maxx - minx;
2054    cmd_buffer->state.clip_window.extent.height = maxy - miny;
2055 
2056    v3dv_X(cmd_buffer->device, job_emit_clip_window)
2057       (cmd_buffer->state.job, &cmd_buffer->state.clip_window);
2058 
2059    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_SCISSOR;
2060 }
2061 
2062 static void
update_gfx_uniform_state(struct v3dv_cmd_buffer * cmd_buffer,uint32_t dirty_uniform_state)2063 update_gfx_uniform_state(struct v3dv_cmd_buffer *cmd_buffer,
2064                          uint32_t dirty_uniform_state)
2065 {
2066    /* We need to update uniform streams if any piece of state that is passed
2067     * to the shader as a uniform may have changed.
2068     *
2069     * If only descriptor sets are dirty then we can safely ignore updates
2070     * for shader stages that don't access descriptors.
2071     */
2072 
2073    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2074    assert(pipeline);
2075 
2076    const bool has_new_pipeline = dirty_uniform_state & V3DV_CMD_DIRTY_PIPELINE;
2077    const bool has_new_viewport = dirty_uniform_state & V3DV_CMD_DIRTY_VIEWPORT;
2078    const bool has_new_push_constants = dirty_uniform_state & V3DV_CMD_DIRTY_PUSH_CONSTANTS;
2079    const bool has_new_descriptors = dirty_uniform_state & V3DV_CMD_DIRTY_DESCRIPTOR_SETS;
2080    const bool has_new_view_index = dirty_uniform_state & V3DV_CMD_DIRTY_VIEW_INDEX;
2081 
2082    /* VK_SHADER_STAGE_FRAGMENT_BIT */
2083    const bool has_new_descriptors_fs =
2084       has_new_descriptors &&
2085       (cmd_buffer->state.dirty_descriptor_stages & VK_SHADER_STAGE_FRAGMENT_BIT);
2086 
2087    const bool has_new_push_constants_fs =
2088       has_new_push_constants &&
2089       (cmd_buffer->state.dirty_push_constants_stages & VK_SHADER_STAGE_FRAGMENT_BIT);
2090 
2091    const bool needs_fs_update = has_new_pipeline ||
2092                                 has_new_view_index ||
2093                                 has_new_push_constants_fs ||
2094                                 has_new_descriptors_fs ||
2095                                 has_new_view_index;
2096 
2097    if (needs_fs_update) {
2098       struct v3dv_shader_variant *fs_variant =
2099          pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
2100 
2101       cmd_buffer->state.uniforms.fs =
2102          v3dv_write_uniforms(cmd_buffer, pipeline, fs_variant);
2103    }
2104 
2105    /* VK_SHADER_STAGE_GEOMETRY_BIT */
2106    if (pipeline->has_gs) {
2107       const bool has_new_descriptors_gs =
2108          has_new_descriptors &&
2109          (cmd_buffer->state.dirty_descriptor_stages &
2110           VK_SHADER_STAGE_GEOMETRY_BIT);
2111 
2112       const bool has_new_push_constants_gs =
2113          has_new_push_constants &&
2114          (cmd_buffer->state.dirty_push_constants_stages &
2115           VK_SHADER_STAGE_GEOMETRY_BIT);
2116 
2117       const bool needs_gs_update = has_new_viewport ||
2118                                    has_new_view_index ||
2119                                    has_new_pipeline ||
2120                                    has_new_push_constants_gs ||
2121                                    has_new_descriptors_gs;
2122 
2123       if (needs_gs_update) {
2124          struct v3dv_shader_variant *gs_variant =
2125             pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
2126 
2127           struct v3dv_shader_variant *gs_bin_variant =
2128             pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
2129 
2130          cmd_buffer->state.uniforms.gs =
2131             v3dv_write_uniforms(cmd_buffer, pipeline, gs_variant);
2132 
2133          cmd_buffer->state.uniforms.gs_bin =
2134             v3dv_write_uniforms(cmd_buffer, pipeline, gs_bin_variant);
2135       }
2136    }
2137 
2138    /* VK_SHADER_STAGE_VERTEX_BIT */
2139    const bool has_new_descriptors_vs =
2140       has_new_descriptors &&
2141       (cmd_buffer->state.dirty_descriptor_stages & VK_SHADER_STAGE_VERTEX_BIT);
2142 
2143    const bool has_new_push_constants_vs =
2144       has_new_push_constants &&
2145       (cmd_buffer->state.dirty_push_constants_stages & VK_SHADER_STAGE_VERTEX_BIT);
2146 
2147    const bool needs_vs_update = has_new_viewport ||
2148                                 has_new_view_index ||
2149                                 has_new_pipeline ||
2150                                 has_new_push_constants_vs ||
2151                                 has_new_descriptors_vs;
2152 
2153    if (needs_vs_update) {
2154       struct v3dv_shader_variant *vs_variant =
2155          pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
2156 
2157        struct v3dv_shader_variant *vs_bin_variant =
2158          pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
2159 
2160       cmd_buffer->state.uniforms.vs =
2161          v3dv_write_uniforms(cmd_buffer, pipeline, vs_variant);
2162 
2163       cmd_buffer->state.uniforms.vs_bin =
2164          v3dv_write_uniforms(cmd_buffer, pipeline, vs_bin_variant);
2165    }
2166 
2167    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_VIEW_INDEX;
2168 }
2169 
2170 /* This stores command buffer state that we might be about to stomp for
2171  * a meta operation.
2172  */
2173 void
v3dv_cmd_buffer_meta_state_push(struct v3dv_cmd_buffer * cmd_buffer,bool push_descriptor_state)2174 v3dv_cmd_buffer_meta_state_push(struct v3dv_cmd_buffer *cmd_buffer,
2175                                 bool push_descriptor_state)
2176 {
2177    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2178 
2179    if (state->subpass_idx != -1) {
2180       state->meta.subpass_idx = state->subpass_idx;
2181       state->meta.framebuffer = v3dv_framebuffer_to_handle(state->framebuffer);
2182       state->meta.pass = v3dv_render_pass_to_handle(state->pass);
2183 
2184       const uint32_t attachment_state_item_size =
2185          sizeof(struct v3dv_cmd_buffer_attachment_state);
2186       const uint32_t attachment_state_total_size =
2187          attachment_state_item_size * state->attachment_alloc_count;
2188       if (state->meta.attachment_alloc_count < state->attachment_alloc_count) {
2189          if (state->meta.attachment_alloc_count > 0)
2190             vk_free(&cmd_buffer->device->vk.alloc, state->meta.attachments);
2191 
2192          state->meta.attachments = vk_zalloc(&cmd_buffer->device->vk.alloc,
2193                                              attachment_state_total_size, 8,
2194                                              VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2195          if (!state->meta.attachments) {
2196             v3dv_flag_oom(cmd_buffer, NULL);
2197             return;
2198          }
2199          state->meta.attachment_alloc_count = state->attachment_alloc_count;
2200       }
2201       state->meta.attachment_count = state->attachment_alloc_count;
2202       memcpy(state->meta.attachments, state->attachments,
2203              attachment_state_total_size);
2204 
2205       state->meta.tile_aligned_render_area = state->tile_aligned_render_area;
2206       memcpy(&state->meta.render_area, &state->render_area, sizeof(VkRect2D));
2207    }
2208 
2209    /* We expect that meta operations are graphics-only, so we only take into
2210     * account the graphics pipeline, and the graphics state
2211     */
2212    state->meta.gfx.pipeline = state->gfx.pipeline;
2213    memcpy(&state->meta.dynamic, &state->dynamic, sizeof(state->dynamic));
2214 
2215    struct v3dv_descriptor_state *gfx_descriptor_state =
2216       &cmd_buffer->state.gfx.descriptor_state;
2217 
2218    if (push_descriptor_state) {
2219       if (gfx_descriptor_state->valid != 0) {
2220          memcpy(&state->meta.gfx.descriptor_state, gfx_descriptor_state,
2221                 sizeof(state->gfx.descriptor_state));
2222       }
2223       state->meta.has_descriptor_state = true;
2224    } else {
2225       state->meta.has_descriptor_state = false;
2226    }
2227 
2228    /* FIXME: if we keep track of wether we have bound any push constant state
2229     *        at all we could restruct this only to cases where it is actually
2230     *        necessary.
2231     */
2232    memcpy(state->meta.push_constants, cmd_buffer->push_constants_data,
2233           sizeof(state->meta.push_constants));
2234 }
2235 
2236 /* This restores command buffer state after a meta operation
2237  */
2238 void
v3dv_cmd_buffer_meta_state_pop(struct v3dv_cmd_buffer * cmd_buffer,uint32_t dirty_dynamic_state,bool needs_subpass_resume)2239 v3dv_cmd_buffer_meta_state_pop(struct v3dv_cmd_buffer *cmd_buffer,
2240                                uint32_t dirty_dynamic_state,
2241                                bool needs_subpass_resume)
2242 {
2243    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2244 
2245    if (state->meta.subpass_idx != -1) {
2246       state->pass = v3dv_render_pass_from_handle(state->meta.pass);
2247       state->framebuffer = v3dv_framebuffer_from_handle(state->meta.framebuffer);
2248 
2249       assert(state->meta.attachment_count <= state->attachment_alloc_count);
2250       const uint32_t attachment_state_item_size =
2251          sizeof(struct v3dv_cmd_buffer_attachment_state);
2252       const uint32_t attachment_state_total_size =
2253          attachment_state_item_size * state->meta.attachment_count;
2254       memcpy(state->attachments, state->meta.attachments,
2255              attachment_state_total_size);
2256 
2257       state->tile_aligned_render_area = state->meta.tile_aligned_render_area;
2258       memcpy(&state->render_area, &state->meta.render_area, sizeof(VkRect2D));
2259 
2260       /* Is needs_subpass_resume is true it means that the emitted the meta
2261        * operation in its own job (possibly with an RT config that is
2262        * incompatible with the current subpass), so resuming subpass execution
2263        * after it requires that we create a new job with the subpass RT setup.
2264        */
2265       if (needs_subpass_resume)
2266          v3dv_cmd_buffer_subpass_resume(cmd_buffer, state->meta.subpass_idx);
2267    } else {
2268       state->subpass_idx = -1;
2269    }
2270 
2271    if (state->meta.gfx.pipeline != NULL) {
2272       struct v3dv_pipeline *pipeline = state->meta.gfx.pipeline;
2273       VkPipelineBindPoint pipeline_binding =
2274          v3dv_pipeline_get_binding_point(pipeline);
2275       v3dv_CmdBindPipeline(v3dv_cmd_buffer_to_handle(cmd_buffer),
2276                            pipeline_binding,
2277                            v3dv_pipeline_to_handle(state->meta.gfx.pipeline));
2278    } else {
2279       state->gfx.pipeline = NULL;
2280    }
2281 
2282    if (dirty_dynamic_state) {
2283       memcpy(&state->dynamic, &state->meta.dynamic, sizeof(state->dynamic));
2284       state->dirty |= dirty_dynamic_state;
2285    }
2286 
2287    if (state->meta.has_descriptor_state) {
2288       if (state->meta.gfx.descriptor_state.valid != 0) {
2289          memcpy(&state->gfx.descriptor_state, &state->meta.gfx.descriptor_state,
2290                 sizeof(state->gfx.descriptor_state));
2291       } else {
2292          state->gfx.descriptor_state.valid = 0;
2293       }
2294    }
2295 
2296    memcpy(cmd_buffer->push_constants_data, state->meta.push_constants,
2297           sizeof(state->meta.push_constants));
2298 
2299    state->meta.gfx.pipeline = NULL;
2300    state->meta.framebuffer = VK_NULL_HANDLE;
2301    state->meta.pass = VK_NULL_HANDLE;
2302    state->meta.subpass_idx = -1;
2303    state->meta.has_descriptor_state = false;
2304 }
2305 
2306 static struct v3dv_job *
cmd_buffer_pre_draw_split_job(struct v3dv_cmd_buffer * cmd_buffer)2307 cmd_buffer_pre_draw_split_job(struct v3dv_cmd_buffer *cmd_buffer)
2308 {
2309    struct v3dv_job *job = cmd_buffer->state.job;
2310    assert(job);
2311 
2312    /* If the job has been flagged with 'always_flush' and it has already
2313     * recorded any draw calls then we need to start a new job for it.
2314     */
2315    if (job->always_flush && job->draw_count > 0) {
2316       assert(cmd_buffer->state.pass);
2317       /* First, flag the current job as not being the last in the
2318        * current subpass
2319        */
2320       job->is_subpass_finish = false;
2321 
2322       /* Now start a new job in the same subpass and flag it as continuing
2323        * the current subpass.
2324        */
2325       job = v3dv_cmd_buffer_subpass_resume(cmd_buffer,
2326                                            cmd_buffer->state.subpass_idx);
2327       assert(job->draw_count == 0);
2328 
2329       /* Inherit the 'always flush' behavior */
2330       job->always_flush = true;
2331    }
2332 
2333    assert(job->draw_count == 0 || !job->always_flush);
2334    return job;
2335 }
2336 
2337 /**
2338  * The Vulkan spec states:
2339  *
2340  *   "It is legal for a subpass to use no color or depth/stencil
2341  *    attachments (...)  This kind of subpass can use shader side effects such
2342  *    as image stores and atomics to produce an output. In this case, the
2343  *    subpass continues to use the width, height, and layers of the framebuffer
2344  *    to define the dimensions of the rendering area, and the
2345  *    rasterizationSamples from each pipeline’s
2346  *    VkPipelineMultisampleStateCreateInfo to define the number of samples used
2347  *    in rasterization."
2348  *
2349  * We need to enable MSAA in the TILE_BINNING_MODE_CFG packet, which we
2350  * emit when we start a new frame at the begining of a subpass. At that point,
2351  * if the framebuffer doesn't have any attachments we won't enable MSAA and
2352  * the job won't be valid in the scenario described by the spec.
2353  *
2354  * This function is intended to be called before a draw call and will test if
2355  * we are in that scenario, in which case, it will restart the current job
2356  * with MSAA enabled.
2357  */
2358 static void
cmd_buffer_restart_job_for_msaa_if_needed(struct v3dv_cmd_buffer * cmd_buffer)2359 cmd_buffer_restart_job_for_msaa_if_needed(struct v3dv_cmd_buffer *cmd_buffer)
2360 {
2361    assert(cmd_buffer->state.job);
2362 
2363    /* We don't support variableMultisampleRate so we know that all pipelines
2364     * bound in the same subpass must have matching number of samples, so we
2365     * can do this check only on the first draw call.
2366     */
2367    if (cmd_buffer->state.job->draw_count > 0)
2368       return;
2369 
2370    /* We only need to restart the frame if the pipeline requires MSAA but
2371     * our frame tiling didn't enable it.
2372     */
2373    if (!cmd_buffer->state.gfx.pipeline->msaa ||
2374        cmd_buffer->state.job->frame_tiling.msaa) {
2375       return;
2376    }
2377 
2378    /* FIXME: Secondary command buffers don't start frames. Instead, they are
2379     * recorded into primary jobs that start them. For secondaries, we should
2380     * still handle this scenario, but we should do that when we record them
2381     * into primaries by testing if any of the secondaries has multisampled
2382     * draw calls in them, and then using that info to decide if we need to
2383     * restart the primary job into which they are being recorded.
2384     */
2385    if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
2386       return;
2387 
2388    /* Drop the current job and restart it with MSAA enabled */
2389    struct v3dv_job *old_job = cmd_buffer->state.job;
2390    cmd_buffer->state.job = NULL;
2391 
2392    struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
2393                                     sizeof(struct v3dv_job), 8,
2394                                     VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2395    if (!job) {
2396       v3dv_flag_oom(cmd_buffer, NULL);
2397       return;
2398    }
2399 
2400    v3dv_job_init(job, V3DV_JOB_TYPE_GPU_CL, cmd_buffer->device, cmd_buffer,
2401                  cmd_buffer->state.subpass_idx);
2402    cmd_buffer->state.job = job;
2403 
2404    v3dv_job_start_frame(job,
2405                         old_job->frame_tiling.width,
2406                         old_job->frame_tiling.height,
2407                         old_job->frame_tiling.layers,
2408                         true,
2409                         old_job->frame_tiling.render_target_count,
2410                         old_job->frame_tiling.internal_bpp,
2411                         true /* msaa */);
2412 
2413    v3dv_job_destroy(old_job);
2414 }
2415 
2416 void
v3dv_cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer * cmd_buffer)2417 v3dv_cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer)
2418 {
2419    assert(cmd_buffer->state.gfx.pipeline);
2420    assert(!(cmd_buffer->state.gfx.pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT));
2421 
2422    /* If we emitted a pipeline barrier right before this draw we won't have
2423     * an active job. In that case, create a new job continuing the current
2424     * subpass.
2425     */
2426    if (!cmd_buffer->state.job) {
2427       v3dv_cmd_buffer_subpass_resume(cmd_buffer,
2428                                      cmd_buffer->state.subpass_idx);
2429    }
2430 
2431    /* Restart single sample job for MSAA pipeline if needed */
2432    cmd_buffer_restart_job_for_msaa_if_needed(cmd_buffer);
2433 
2434    /* If the job is configured to flush on every draw call we need to create
2435     * a new job now.
2436     */
2437    struct v3dv_job *job = cmd_buffer_pre_draw_split_job(cmd_buffer);
2438    job->draw_count++;
2439 
2440    /* GL shader state binds shaders, uniform and vertex attribute state. The
2441     * compiler injects uniforms to handle some descriptor types (such as
2442     * textures), so we need to regen that when descriptor state changes.
2443     *
2444     * We also need to emit new shader state if we have a dirty viewport since
2445     * that will require that we new uniform state for QUNIFORM_VIEWPORT_*.
2446     */
2447    uint32_t *dirty = &cmd_buffer->state.dirty;
2448 
2449    const uint32_t dirty_uniform_state =
2450       *dirty & (V3DV_CMD_DIRTY_PIPELINE |
2451                 V3DV_CMD_DIRTY_PUSH_CONSTANTS |
2452                 V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
2453                 V3DV_CMD_DIRTY_VIEWPORT |
2454                 V3DV_CMD_DIRTY_VIEW_INDEX);
2455 
2456    if (dirty_uniform_state)
2457       update_gfx_uniform_state(cmd_buffer, dirty_uniform_state);
2458 
2459    struct v3dv_device *device = cmd_buffer->device;
2460 
2461    if (dirty_uniform_state || (*dirty & V3DV_CMD_DIRTY_VERTEX_BUFFER))
2462       v3dv_X(device, cmd_buffer_emit_gl_shader_state)(cmd_buffer);
2463 
2464    if (*dirty & (V3DV_CMD_DIRTY_PIPELINE)) {
2465       v3dv_X(device, cmd_buffer_emit_configuration_bits)(cmd_buffer);
2466       v3dv_X(device, cmd_buffer_emit_varyings_state)(cmd_buffer);
2467    }
2468 
2469    if (*dirty & (V3DV_CMD_DIRTY_VIEWPORT | V3DV_CMD_DIRTY_SCISSOR)) {
2470       emit_scissor(cmd_buffer);
2471    }
2472 
2473    if (*dirty & V3DV_CMD_DIRTY_VIEWPORT) {
2474       v3dv_X(device, cmd_buffer_emit_viewport)(cmd_buffer);
2475    }
2476 
2477    if (*dirty & V3DV_CMD_DIRTY_INDEX_BUFFER)
2478       v3dv_X(device, cmd_buffer_emit_index_buffer)(cmd_buffer);
2479 
2480    const uint32_t dynamic_stencil_dirty_flags =
2481       V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
2482       V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
2483       V3DV_CMD_DIRTY_STENCIL_REFERENCE;
2484    if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | dynamic_stencil_dirty_flags))
2485       v3dv_X(device, cmd_buffer_emit_stencil)(cmd_buffer);
2486 
2487    if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_DEPTH_BIAS))
2488       v3dv_X(device, cmd_buffer_emit_depth_bias)(cmd_buffer);
2489 
2490    if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_BLEND_CONSTANTS))
2491       v3dv_X(device, cmd_buffer_emit_blend)(cmd_buffer);
2492 
2493    if (*dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY)
2494       v3dv_X(device, cmd_buffer_emit_occlusion_query)(cmd_buffer);
2495 
2496    if (*dirty & V3DV_CMD_DIRTY_LINE_WIDTH)
2497       v3dv_X(device, cmd_buffer_emit_line_width)(cmd_buffer);
2498 
2499    if (*dirty & V3DV_CMD_DIRTY_PIPELINE)
2500       v3dv_X(device, cmd_buffer_emit_sample_state)(cmd_buffer);
2501 
2502    if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_COLOR_WRITE_ENABLE))
2503       v3dv_X(device, cmd_buffer_emit_color_write_mask)(cmd_buffer);
2504 
2505    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_PIPELINE;
2506 }
2507 
2508 static inline void
cmd_buffer_set_view_index(struct v3dv_cmd_buffer * cmd_buffer,uint32_t view_index)2509 cmd_buffer_set_view_index(struct v3dv_cmd_buffer *cmd_buffer,
2510                           uint32_t view_index)
2511 {
2512    cmd_buffer->state.view_index = view_index;
2513    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_VIEW_INDEX;
2514 }
2515 
2516 static void
cmd_buffer_draw(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_draw_info * info)2517 cmd_buffer_draw(struct v3dv_cmd_buffer *cmd_buffer,
2518                 struct v3dv_draw_info *info)
2519 {
2520 
2521    struct v3dv_render_pass *pass = cmd_buffer->state.pass;
2522    if (likely(!pass->multiview_enabled)) {
2523       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2524       v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw)(cmd_buffer, info);
2525       return;
2526    }
2527 
2528    uint32_t view_mask = pass->subpasses[cmd_buffer->state.subpass_idx].view_mask;
2529    while (view_mask) {
2530       cmd_buffer_set_view_index(cmd_buffer, u_bit_scan(&view_mask));
2531       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2532       v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw)(cmd_buffer, info);
2533    }
2534 }
2535 
2536 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)2537 v3dv_CmdDraw(VkCommandBuffer commandBuffer,
2538              uint32_t vertexCount,
2539              uint32_t instanceCount,
2540              uint32_t firstVertex,
2541              uint32_t firstInstance)
2542 {
2543    if (vertexCount == 0 || instanceCount == 0)
2544       return;
2545 
2546    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2547    struct v3dv_draw_info info = {};
2548    info.vertex_count = vertexCount;
2549    info.instance_count = instanceCount;
2550    info.first_instance = firstInstance;
2551    info.first_vertex = firstVertex;
2552 
2553    cmd_buffer_draw(cmd_buffer, &info);
2554 }
2555 
2556 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)2557 v3dv_CmdDrawIndexed(VkCommandBuffer commandBuffer,
2558                     uint32_t indexCount,
2559                     uint32_t instanceCount,
2560                     uint32_t firstIndex,
2561                     int32_t vertexOffset,
2562                     uint32_t firstInstance)
2563 {
2564    if (indexCount == 0 || instanceCount == 0)
2565       return;
2566 
2567    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2568 
2569    struct v3dv_render_pass *pass = cmd_buffer->state.pass;
2570    if (likely(!pass->multiview_enabled)) {
2571       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2572       v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indexed)
2573          (cmd_buffer, indexCount, instanceCount,
2574           firstIndex, vertexOffset, firstInstance);
2575       return;
2576    }
2577 
2578    uint32_t view_mask = pass->subpasses[cmd_buffer->state.subpass_idx].view_mask;
2579    while (view_mask) {
2580       cmd_buffer_set_view_index(cmd_buffer, u_bit_scan(&view_mask));
2581       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2582       v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indexed)
2583          (cmd_buffer, indexCount, instanceCount,
2584           firstIndex, vertexOffset, firstInstance);
2585    }
2586 }
2587 
2588 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2589 v3dv_CmdDrawIndirect(VkCommandBuffer commandBuffer,
2590                      VkBuffer _buffer,
2591                      VkDeviceSize offset,
2592                      uint32_t drawCount,
2593                      uint32_t stride)
2594 {
2595    /* drawCount is the number of draws to execute, and can be zero. */
2596    if (drawCount == 0)
2597       return;
2598 
2599    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2600    V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
2601 
2602    struct v3dv_render_pass *pass = cmd_buffer->state.pass;
2603    if (likely(!pass->multiview_enabled)) {
2604       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2605       v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indirect)
2606          (cmd_buffer, buffer, offset, drawCount, stride);
2607       return;
2608    }
2609 
2610    uint32_t view_mask = pass->subpasses[cmd_buffer->state.subpass_idx].view_mask;
2611    while (view_mask) {
2612       cmd_buffer_set_view_index(cmd_buffer, u_bit_scan(&view_mask));
2613       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2614       v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw_indirect)
2615          (cmd_buffer, buffer, offset, drawCount, stride);
2616    }
2617 }
2618 
2619 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2620 v3dv_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
2621                             VkBuffer _buffer,
2622                             VkDeviceSize offset,
2623                             uint32_t drawCount,
2624                             uint32_t stride)
2625 {
2626    /* drawCount is the number of draws to execute, and can be zero. */
2627    if (drawCount == 0)
2628       return;
2629 
2630    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2631    V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
2632 
2633    struct v3dv_render_pass *pass = cmd_buffer->state.pass;
2634    if (likely(!pass->multiview_enabled)) {
2635       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2636       v3dv_X(cmd_buffer->device, cmd_buffer_emit_indexed_indirect)
2637          (cmd_buffer, buffer, offset, drawCount, stride);
2638       return;
2639    }
2640 
2641    uint32_t view_mask = pass->subpasses[cmd_buffer->state.subpass_idx].view_mask;
2642    while (view_mask) {
2643       cmd_buffer_set_view_index(cmd_buffer, u_bit_scan(&view_mask));
2644       v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2645       v3dv_X(cmd_buffer->device, cmd_buffer_emit_indexed_indirect)
2646          (cmd_buffer, buffer, offset, drawCount, stride);
2647    }
2648 }
2649 
2650 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferBarrierCount,const VkBufferMemoryBarrier * pBufferBarriers,uint32_t imageBarrierCount,const VkImageMemoryBarrier * pImageBarriers)2651 v3dv_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
2652                         VkPipelineStageFlags srcStageMask,
2653                         VkPipelineStageFlags dstStageMask,
2654                         VkDependencyFlags dependencyFlags,
2655                         uint32_t memoryBarrierCount,
2656                         const VkMemoryBarrier *pMemoryBarriers,
2657                         uint32_t bufferBarrierCount,
2658                         const VkBufferMemoryBarrier *pBufferBarriers,
2659                         uint32_t imageBarrierCount,
2660                         const VkImageMemoryBarrier *pImageBarriers)
2661 {
2662    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2663 
2664    /* We only care about barriers between GPU jobs */
2665    if (srcStageMask == VK_PIPELINE_STAGE_HOST_BIT ||
2666        dstStageMask == VK_PIPELINE_STAGE_HOST_BIT) {
2667       return;
2668    }
2669 
2670    /* If we have a recording job, finish it here */
2671    struct v3dv_job *job = cmd_buffer->state.job;
2672    if (job)
2673       v3dv_cmd_buffer_finish_job(cmd_buffer);
2674 
2675    cmd_buffer->state.has_barrier = true;
2676    if (dstStageMask & (VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
2677                        VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
2678                        VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
2679                        VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
2680                        VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
2681                        VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT)) {
2682       cmd_buffer->state.has_bcl_barrier = true;
2683    }
2684 }
2685 
2686 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)2687 v3dv_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
2688                           uint32_t firstBinding,
2689                           uint32_t bindingCount,
2690                           const VkBuffer *pBuffers,
2691                           const VkDeviceSize *pOffsets)
2692 {
2693    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2694    struct v3dv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
2695 
2696    /* We have to defer setting up vertex buffer since we need the buffer
2697     * stride from the pipeline.
2698     */
2699 
2700    assert(firstBinding + bindingCount <= MAX_VBS);
2701    bool vb_state_changed = false;
2702    for (uint32_t i = 0; i < bindingCount; i++) {
2703       if (vb[firstBinding + i].buffer != v3dv_buffer_from_handle(pBuffers[i])) {
2704          vb[firstBinding + i].buffer = v3dv_buffer_from_handle(pBuffers[i]);
2705          vb_state_changed = true;
2706       }
2707       if (vb[firstBinding + i].offset != pOffsets[i]) {
2708          vb[firstBinding + i].offset = pOffsets[i];
2709          vb_state_changed = true;
2710       }
2711    }
2712 
2713    if (vb_state_changed)
2714       cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_VERTEX_BUFFER;
2715 }
2716 
2717 static uint32_t
get_index_size(VkIndexType index_type)2718 get_index_size(VkIndexType index_type)
2719 {
2720    switch (index_type) {
2721    case VK_INDEX_TYPE_UINT8_EXT:
2722       return 1;
2723       break;
2724    case VK_INDEX_TYPE_UINT16:
2725       return 2;
2726       break;
2727    case VK_INDEX_TYPE_UINT32:
2728       return 4;
2729       break;
2730    default:
2731       unreachable("Unsupported index type");
2732    }
2733 }
2734 
2735 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)2736 v3dv_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
2737                         VkBuffer buffer,
2738                         VkDeviceSize offset,
2739                         VkIndexType indexType)
2740 {
2741    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2742 
2743    const uint32_t index_size = get_index_size(indexType);
2744    if (buffer == cmd_buffer->state.index_buffer.buffer &&
2745        offset == cmd_buffer->state.index_buffer.offset &&
2746        index_size == cmd_buffer->state.index_buffer.index_size) {
2747       return;
2748    }
2749 
2750    cmd_buffer->state.index_buffer.buffer = buffer;
2751    cmd_buffer->state.index_buffer.offset = offset;
2752    cmd_buffer->state.index_buffer.index_size = index_size;
2753    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_INDEX_BUFFER;
2754 }
2755 
2756 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)2757 v3dv_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2758                               VkStencilFaceFlags faceMask,
2759                               uint32_t compareMask)
2760 {
2761    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2762 
2763    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2764       cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask & 0xff;
2765    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2766       cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask & 0xff;
2767 
2768    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK;
2769 }
2770 
2771 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)2772 v3dv_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2773                             VkStencilFaceFlags faceMask,
2774                             uint32_t writeMask)
2775 {
2776    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2777 
2778    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2779       cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask & 0xff;
2780    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2781       cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask & 0xff;
2782 
2783    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_STENCIL_WRITE_MASK;
2784 }
2785 
2786 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)2787 v3dv_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2788                             VkStencilFaceFlags faceMask,
2789                             uint32_t reference)
2790 {
2791    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2792 
2793    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2794       cmd_buffer->state.dynamic.stencil_reference.front = reference & 0xff;
2795    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2796       cmd_buffer->state.dynamic.stencil_reference.back = reference & 0xff;
2797 
2798    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_STENCIL_REFERENCE;
2799 }
2800 
2801 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)2802 v3dv_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2803                      float depthBiasConstantFactor,
2804                      float depthBiasClamp,
2805                      float depthBiasSlopeFactor)
2806 {
2807    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2808 
2809    cmd_buffer->state.dynamic.depth_bias.constant_factor = depthBiasConstantFactor;
2810    cmd_buffer->state.dynamic.depth_bias.depth_bias_clamp = depthBiasClamp;
2811    cmd_buffer->state.dynamic.depth_bias.slope_factor = depthBiasSlopeFactor;
2812    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_DEPTH_BIAS;
2813 }
2814 
2815 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)2816 v3dv_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2817                        float minDepthBounds,
2818                        float maxDepthBounds)
2819 {
2820    /* We do not support depth bounds testing so we just ingore this. We are
2821     * already asserting that pipelines don't enable the feature anyway.
2822     */
2823 }
2824 
2825 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetLineStippleEXT(VkCommandBuffer commandBuffer,uint32_t lineStippleFactor,uint16_t lineStipplePattern)2826 v3dv_CmdSetLineStippleEXT(VkCommandBuffer commandBuffer,
2827                           uint32_t lineStippleFactor,
2828                           uint16_t lineStipplePattern)
2829 {
2830    /* We do not support stippled line rasterization so we just ignore this. */
2831 }
2832 
2833 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)2834 v3dv_CmdSetLineWidth(VkCommandBuffer commandBuffer,
2835                      float lineWidth)
2836 {
2837    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2838 
2839    cmd_buffer->state.dynamic.line_width = lineWidth;
2840    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_LINE_WIDTH;
2841 }
2842 
2843 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout _layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)2844 v3dv_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
2845                            VkPipelineBindPoint pipelineBindPoint,
2846                            VkPipelineLayout _layout,
2847                            uint32_t firstSet,
2848                            uint32_t descriptorSetCount,
2849                            const VkDescriptorSet *pDescriptorSets,
2850                            uint32_t dynamicOffsetCount,
2851                            const uint32_t *pDynamicOffsets)
2852 {
2853    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2854    V3DV_FROM_HANDLE(v3dv_pipeline_layout, layout, _layout);
2855 
2856    uint32_t dyn_index = 0;
2857 
2858    assert(firstSet + descriptorSetCount <= MAX_SETS);
2859 
2860    struct v3dv_descriptor_state *descriptor_state =
2861       pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE ?
2862       &cmd_buffer->state.compute.descriptor_state :
2863       &cmd_buffer->state.gfx.descriptor_state;
2864 
2865    VkShaderStageFlags dirty_stages = 0;
2866    bool descriptor_state_changed = false;
2867    for (uint32_t i = 0; i < descriptorSetCount; i++) {
2868       V3DV_FROM_HANDLE(v3dv_descriptor_set, set, pDescriptorSets[i]);
2869       uint32_t index = firstSet + i;
2870 
2871       descriptor_state->valid |= (1u << index);
2872       if (descriptor_state->descriptor_sets[index] != set) {
2873          descriptor_state->descriptor_sets[index] = set;
2874          dirty_stages |= set->layout->shader_stages;
2875          descriptor_state_changed = true;
2876       }
2877 
2878       for (uint32_t j = 0; j < set->layout->dynamic_offset_count; j++, dyn_index++) {
2879          uint32_t idx = j + layout->set[i + firstSet].dynamic_offset_start;
2880 
2881          if (descriptor_state->dynamic_offsets[idx] != pDynamicOffsets[dyn_index]) {
2882             descriptor_state->dynamic_offsets[idx] = pDynamicOffsets[dyn_index];
2883             dirty_stages |= set->layout->shader_stages;
2884             descriptor_state_changed = true;
2885          }
2886       }
2887    }
2888 
2889    if (descriptor_state_changed) {
2890       if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
2891          cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_DESCRIPTOR_SETS;
2892          cmd_buffer->state.dirty_descriptor_stages |= dirty_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
2893       } else {
2894          cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
2895          cmd_buffer->state.dirty_descriptor_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2896       }
2897    }
2898 }
2899 
2900 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdPushConstants(VkCommandBuffer commandBuffer,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * pValues)2901 v3dv_CmdPushConstants(VkCommandBuffer commandBuffer,
2902                       VkPipelineLayout layout,
2903                       VkShaderStageFlags stageFlags,
2904                       uint32_t offset,
2905                       uint32_t size,
2906                       const void *pValues)
2907 {
2908    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2909 
2910    if (!memcmp((uint8_t *) cmd_buffer->push_constants_data + offset, pValues, size))
2911       return;
2912 
2913    memcpy((uint8_t *) cmd_buffer->push_constants_data + offset, pValues, size);
2914 
2915    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_PUSH_CONSTANTS;
2916    cmd_buffer->state.dirty_push_constants_stages |= stageFlags;
2917 }
2918 
2919 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])2920 v3dv_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2921                           const float blendConstants[4])
2922 {
2923    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2924    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2925 
2926    if (!memcmp(state->dynamic.blend_constants, blendConstants,
2927                sizeof(state->dynamic.blend_constants))) {
2928       return;
2929    }
2930 
2931    memcpy(state->dynamic.blend_constants, blendConstants,
2932           sizeof(state->dynamic.blend_constants));
2933 
2934    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_BLEND_CONSTANTS;
2935 }
2936 
2937 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetColorWriteEnableEXT(VkCommandBuffer commandBuffer,uint32_t attachmentCount,const VkBool32 * pColorWriteEnables)2938 v3dv_CmdSetColorWriteEnableEXT(VkCommandBuffer commandBuffer,
2939                                uint32_t attachmentCount,
2940                                const VkBool32 *pColorWriteEnables)
2941 {
2942    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
2943    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2944    uint32_t color_write_enable = 0;
2945 
2946    for (uint32_t i = 0; i < attachmentCount; i++)
2947       color_write_enable |= pColorWriteEnables[i] ? (0xfu << (i * 4)) : 0;
2948 
2949    if (state->dynamic.color_write_enable == color_write_enable)
2950       return;
2951 
2952    state->dynamic.color_write_enable = color_write_enable;
2953 
2954    state->dirty |= V3DV_CMD_DIRTY_COLOR_WRITE_ENABLE;
2955 }
2956 
2957 void
v3dv_cmd_buffer_reset_queries(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_query_pool * pool,uint32_t first,uint32_t count)2958 v3dv_cmd_buffer_reset_queries(struct v3dv_cmd_buffer *cmd_buffer,
2959                               struct v3dv_query_pool *pool,
2960                               uint32_t first,
2961                               uint32_t count)
2962 {
2963    /* Resets can only happen outside a render pass instance so we should not
2964     * be in the middle of job recording.
2965     */
2966    assert(cmd_buffer->state.pass == NULL);
2967    assert(cmd_buffer->state.job == NULL);
2968 
2969    assert(first < pool->query_count);
2970    assert(first + count <= pool->query_count);
2971 
2972    struct v3dv_job *job =
2973       v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
2974                                      V3DV_JOB_TYPE_CPU_RESET_QUERIES,
2975                                      cmd_buffer, -1);
2976    v3dv_return_if_oom(cmd_buffer, NULL);
2977 
2978    job->cpu.query_reset.pool = pool;
2979    job->cpu.query_reset.first = first;
2980    job->cpu.query_reset.count = count;
2981 
2982    list_addtail(&job->list_link, &cmd_buffer->jobs);
2983 }
2984 
2985 void
v3dv_cmd_buffer_ensure_array_state(struct v3dv_cmd_buffer * cmd_buffer,uint32_t slot_size,uint32_t used_count,uint32_t * alloc_count,void ** ptr)2986 v3dv_cmd_buffer_ensure_array_state(struct v3dv_cmd_buffer *cmd_buffer,
2987                                    uint32_t slot_size,
2988                                    uint32_t used_count,
2989                                    uint32_t *alloc_count,
2990                                    void **ptr)
2991 {
2992    if (used_count >= *alloc_count) {
2993       const uint32_t prev_slot_count = *alloc_count;
2994       void *old_buffer = *ptr;
2995 
2996       const uint32_t new_slot_count = MAX2(*alloc_count * 2, 4);
2997       const uint32_t bytes = new_slot_count * slot_size;
2998       *ptr = vk_alloc(&cmd_buffer->device->vk.alloc, bytes, 8,
2999                       VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3000       if (*ptr == NULL) {
3001          fprintf(stderr, "Error: failed to allocate CPU buffer for query.\n");
3002          v3dv_flag_oom(cmd_buffer, NULL);
3003          return;
3004       }
3005 
3006       memcpy(*ptr, old_buffer, prev_slot_count * slot_size);
3007       *alloc_count = new_slot_count;
3008    }
3009    assert(used_count < *alloc_count);
3010 }
3011 
3012 void
v3dv_cmd_buffer_begin_query(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_query_pool * pool,uint32_t query,VkQueryControlFlags flags)3013 v3dv_cmd_buffer_begin_query(struct v3dv_cmd_buffer *cmd_buffer,
3014                             struct v3dv_query_pool *pool,
3015                             uint32_t query,
3016                             VkQueryControlFlags flags)
3017 {
3018    /* FIXME: we only support one active query for now */
3019    assert(cmd_buffer->state.query.active_query.bo == NULL);
3020    assert(query < pool->query_count);
3021 
3022    cmd_buffer->state.query.active_query.bo = pool->queries[query].bo;
3023    cmd_buffer->state.query.active_query.offset = pool->queries[query].offset;
3024    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_OCCLUSION_QUERY;
3025 }
3026 
3027 void
v3dv_cmd_buffer_end_query(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_query_pool * pool,uint32_t query)3028 v3dv_cmd_buffer_end_query(struct v3dv_cmd_buffer *cmd_buffer,
3029                           struct v3dv_query_pool *pool,
3030                           uint32_t query)
3031 {
3032    assert(query < pool->query_count);
3033    assert(cmd_buffer->state.query.active_query.bo != NULL);
3034 
3035    if  (cmd_buffer->state.pass) {
3036       /* Queue the EndQuery in the command buffer state, we will create a CPU
3037        * job to flag all of these queries as possibly available right after the
3038        * render pass job in which they have been recorded.
3039        */
3040       struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
3041       v3dv_cmd_buffer_ensure_array_state(cmd_buffer,
3042                                          sizeof(struct v3dv_end_query_cpu_job_info),
3043                                          state->query.end.used_count,
3044                                          &state->query.end.alloc_count,
3045                                          (void **) &state->query.end.states);
3046       v3dv_return_if_oom(cmd_buffer, NULL);
3047 
3048       struct v3dv_end_query_cpu_job_info *info =
3049          &state->query.end.states[state->query.end.used_count++];
3050 
3051       info->pool = pool;
3052       info->query = query;
3053 
3054       /* From the Vulkan spec:
3055        *
3056        *   "If queries are used while executing a render pass instance that has
3057        *    multiview enabled, the query uses N consecutive query indices in
3058        *    the query pool (starting at query) where N is the number of bits set
3059        *    in the view mask in the subpass the query is used in. How the
3060        *    numerical results of the query are distributed among the queries is
3061        *    implementation-dependent."
3062        *
3063        * In our case, only the first query is used but this means we still need
3064        * to flag the other queries as available so we don't emit errors when
3065        * the applications attempt to retrive values from them.
3066        */
3067       struct v3dv_render_pass *pass = cmd_buffer->state.pass;
3068       if (!pass->multiview_enabled) {
3069          info->count = 1;
3070       } else {
3071          struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
3072          info->count = util_bitcount(subpass->view_mask);
3073       }
3074    } else {
3075       /* Otherwise, schedule the CPU job immediately */
3076       struct v3dv_job *job =
3077          v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3078                                         V3DV_JOB_TYPE_CPU_END_QUERY,
3079                                         cmd_buffer, -1);
3080       v3dv_return_if_oom(cmd_buffer, NULL);
3081 
3082       job->cpu.query_end.pool = pool;
3083       job->cpu.query_end.query = query;
3084 
3085       /* Multiview queries cannot cross subpass boundaries */
3086       job->cpu.query_end.count = 1;
3087 
3088       list_addtail(&job->list_link, &cmd_buffer->jobs);
3089    }
3090 
3091    cmd_buffer->state.query.active_query.bo = NULL;
3092    cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_OCCLUSION_QUERY;
3093 }
3094 
3095 void
v3dv_cmd_buffer_copy_query_results(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_query_pool * pool,uint32_t first,uint32_t count,struct v3dv_buffer * dst,uint32_t offset,uint32_t stride,VkQueryResultFlags flags)3096 v3dv_cmd_buffer_copy_query_results(struct v3dv_cmd_buffer *cmd_buffer,
3097                                    struct v3dv_query_pool *pool,
3098                                    uint32_t first,
3099                                    uint32_t count,
3100                                    struct v3dv_buffer *dst,
3101                                    uint32_t offset,
3102                                    uint32_t stride,
3103                                    VkQueryResultFlags flags)
3104 {
3105    /* Copies can only happen outside a render pass instance so we should not
3106     * be in the middle of job recording.
3107     */
3108    assert(cmd_buffer->state.pass == NULL);
3109    assert(cmd_buffer->state.job == NULL);
3110 
3111    assert(first < pool->query_count);
3112    assert(first + count <= pool->query_count);
3113 
3114    struct v3dv_job *job =
3115       v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3116                                      V3DV_JOB_TYPE_CPU_COPY_QUERY_RESULTS,
3117                                      cmd_buffer, -1);
3118    v3dv_return_if_oom(cmd_buffer, NULL);
3119 
3120    job->cpu.query_copy_results.pool = pool;
3121    job->cpu.query_copy_results.first = first;
3122    job->cpu.query_copy_results.count = count;
3123    job->cpu.query_copy_results.dst = dst;
3124    job->cpu.query_copy_results.offset = offset;
3125    job->cpu.query_copy_results.stride = stride;
3126    job->cpu.query_copy_results.flags = flags;
3127 
3128    list_addtail(&job->list_link, &cmd_buffer->jobs);
3129 }
3130 
3131 void
v3dv_cmd_buffer_add_tfu_job(struct v3dv_cmd_buffer * cmd_buffer,struct drm_v3d_submit_tfu * tfu)3132 v3dv_cmd_buffer_add_tfu_job(struct v3dv_cmd_buffer *cmd_buffer,
3133                             struct drm_v3d_submit_tfu *tfu)
3134 {
3135    struct v3dv_device *device = cmd_buffer->device;
3136    struct v3dv_job *job = vk_zalloc(&device->vk.alloc,
3137                                     sizeof(struct v3dv_job), 8,
3138                                     VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3139    if (!job) {
3140       v3dv_flag_oom(cmd_buffer, NULL);
3141       return;
3142    }
3143 
3144    v3dv_job_init(job, V3DV_JOB_TYPE_GPU_TFU, device, cmd_buffer, -1);
3145    job->tfu = *tfu;
3146    list_addtail(&job->list_link, &cmd_buffer->jobs);
3147 }
3148 
3149 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent _event,VkPipelineStageFlags stageMask)3150 v3dv_CmdSetEvent(VkCommandBuffer commandBuffer,
3151                  VkEvent _event,
3152                  VkPipelineStageFlags stageMask)
3153 {
3154    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3155    V3DV_FROM_HANDLE(v3dv_event, event, _event);
3156 
3157    /* Event (re)sets can only happen outside a render pass instance so we
3158     * should not be in the middle of job recording.
3159     */
3160    assert(cmd_buffer->state.pass == NULL);
3161    assert(cmd_buffer->state.job == NULL);
3162 
3163    struct v3dv_job *job =
3164       v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3165                                      V3DV_JOB_TYPE_CPU_SET_EVENT,
3166                                      cmd_buffer, -1);
3167    v3dv_return_if_oom(cmd_buffer, NULL);
3168 
3169    job->cpu.event_set.event = event;
3170    job->cpu.event_set.state = 1;
3171 
3172    list_addtail(&job->list_link, &cmd_buffer->jobs);
3173 }
3174 
3175 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent _event,VkPipelineStageFlags stageMask)3176 v3dv_CmdResetEvent(VkCommandBuffer commandBuffer,
3177                    VkEvent _event,
3178                    VkPipelineStageFlags stageMask)
3179 {
3180    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3181    V3DV_FROM_HANDLE(v3dv_event, event, _event);
3182 
3183    /* Event (re)sets can only happen outside a render pass instance so we
3184     * should not be in the middle of job recording.
3185     */
3186    assert(cmd_buffer->state.pass == NULL);
3187    assert(cmd_buffer->state.job == NULL);
3188 
3189    struct v3dv_job *job =
3190       v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3191                                      V3DV_JOB_TYPE_CPU_SET_EVENT,
3192                                      cmd_buffer, -1);
3193    v3dv_return_if_oom(cmd_buffer, NULL);
3194 
3195    job->cpu.event_set.event = event;
3196    job->cpu.event_set.state = 0;
3197 
3198    list_addtail(&job->list_link, &cmd_buffer->jobs);
3199 }
3200 
3201 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)3202 v3dv_CmdWaitEvents(VkCommandBuffer commandBuffer,
3203                    uint32_t eventCount,
3204                    const VkEvent *pEvents,
3205                    VkPipelineStageFlags srcStageMask,
3206                    VkPipelineStageFlags dstStageMask,
3207                    uint32_t memoryBarrierCount,
3208                    const VkMemoryBarrier *pMemoryBarriers,
3209                    uint32_t bufferMemoryBarrierCount,
3210                    const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3211                    uint32_t imageMemoryBarrierCount,
3212                    const VkImageMemoryBarrier *pImageMemoryBarriers)
3213 {
3214    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3215 
3216    assert(eventCount > 0);
3217 
3218    struct v3dv_job *job =
3219       v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3220                                      V3DV_JOB_TYPE_CPU_WAIT_EVENTS,
3221                                      cmd_buffer, -1);
3222    v3dv_return_if_oom(cmd_buffer, NULL);
3223 
3224    const uint32_t event_list_size = sizeof(struct v3dv_event *) * eventCount;
3225 
3226    job->cpu.event_wait.events =
3227       vk_alloc(&cmd_buffer->device->vk.alloc, event_list_size, 8,
3228                VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3229    if (!job->cpu.event_wait.events) {
3230       v3dv_flag_oom(cmd_buffer, NULL);
3231       return;
3232    }
3233    job->cpu.event_wait.event_count = eventCount;
3234 
3235    for (uint32_t i = 0; i < eventCount; i++)
3236       job->cpu.event_wait.events[i] = v3dv_event_from_handle(pEvents[i]);
3237 
3238    /* vkCmdWaitEvents can be recorded inside a render pass, so we might have
3239     * an active job.
3240     *
3241     * If we are inside a render pass, because we vkCmd(Re)SetEvent can't happen
3242     * inside a render pass, it is safe to move the wait job so it happens right
3243     * before the current job we are currently recording for the subpass, if any
3244     * (it would actually be safe to move it all the way back to right before
3245     * the start of the render pass).
3246     *
3247     * If we are outside a render pass then we should not have any on-going job
3248     * and we are free to just add the wait job without restrictions.
3249     */
3250    assert(cmd_buffer->state.pass || !cmd_buffer->state.job);
3251    list_addtail(&job->list_link, &cmd_buffer->jobs);
3252 }
3253 
3254 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)3255 v3dv_CmdWriteTimestamp(VkCommandBuffer commandBuffer,
3256                        VkPipelineStageFlagBits pipelineStage,
3257                        VkQueryPool queryPool,
3258                        uint32_t query)
3259 {
3260    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3261    V3DV_FROM_HANDLE(v3dv_query_pool, query_pool, queryPool);
3262 
3263    /* If this is called inside a render pass we need to finish the current
3264     * job here...
3265     */
3266    struct v3dv_render_pass *pass = cmd_buffer->state.pass;
3267    if (pass)
3268       v3dv_cmd_buffer_finish_job(cmd_buffer);
3269 
3270    struct v3dv_job *job =
3271       v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3272                                      V3DV_JOB_TYPE_CPU_TIMESTAMP_QUERY,
3273                                      cmd_buffer, -1);
3274    v3dv_return_if_oom(cmd_buffer, NULL);
3275 
3276    job->cpu.query_timestamp.pool = query_pool;
3277    job->cpu.query_timestamp.query = query;
3278 
3279    if (!pass || !pass->multiview_enabled) {
3280       job->cpu.query_timestamp.count = 1;
3281    } else {
3282       struct v3dv_subpass *subpass =
3283          &pass->subpasses[cmd_buffer->state.subpass_idx];
3284       job->cpu.query_timestamp.count = util_bitcount(subpass->view_mask);
3285    }
3286 
3287    list_addtail(&job->list_link, &cmd_buffer->jobs);
3288    cmd_buffer->state.job = NULL;
3289 
3290    /* ...and resume the subpass after the timestamp */
3291    if (cmd_buffer->state.pass)
3292       v3dv_cmd_buffer_subpass_resume(cmd_buffer, cmd_buffer->state.subpass_idx);
3293 }
3294 
3295 static void
cmd_buffer_emit_pre_dispatch(struct v3dv_cmd_buffer * cmd_buffer)3296 cmd_buffer_emit_pre_dispatch(struct v3dv_cmd_buffer *cmd_buffer)
3297 {
3298    assert(cmd_buffer->state.compute.pipeline);
3299    assert(cmd_buffer->state.compute.pipeline->active_stages ==
3300           VK_SHADER_STAGE_COMPUTE_BIT);
3301 
3302    cmd_buffer->state.dirty &= ~(V3DV_CMD_DIRTY_COMPUTE_PIPELINE |
3303                                 V3DV_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS);
3304    cmd_buffer->state.dirty_descriptor_stages &= ~VK_SHADER_STAGE_COMPUTE_BIT;
3305    cmd_buffer->state.dirty_push_constants_stages &= ~VK_SHADER_STAGE_COMPUTE_BIT;
3306 }
3307 
3308 #define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
3309 #define V3D_CSD_CFG012_WG_OFFSET_SHIFT 0
3310 /* Allow this dispatch to start while the last one is still running. */
3311 #define V3D_CSD_CFG3_OVERLAP_WITH_PREV (1 << 26)
3312 /* Maximum supergroup ID.  6 bits. */
3313 #define V3D_CSD_CFG3_MAX_SG_ID_SHIFT 20
3314 /* Batches per supergroup minus 1.  8 bits. */
3315 #define V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT 12
3316 /* Workgroups per supergroup, 0 means 16 */
3317 #define V3D_CSD_CFG3_WGS_PER_SG_SHIFT 8
3318 #define V3D_CSD_CFG3_WG_SIZE_SHIFT 0
3319 
3320 #define V3D_CSD_CFG5_PROPAGATE_NANS (1 << 2)
3321 #define V3D_CSD_CFG5_SINGLE_SEG (1 << 1)
3322 #define V3D_CSD_CFG5_THREADING (1 << 0)
3323 
3324 void
v3dv_cmd_buffer_rewrite_indirect_csd_job(struct v3dv_csd_indirect_cpu_job_info * info,const uint32_t * wg_counts)3325 v3dv_cmd_buffer_rewrite_indirect_csd_job(
3326    struct v3dv_csd_indirect_cpu_job_info *info,
3327    const uint32_t *wg_counts)
3328 {
3329    assert(info->csd_job);
3330    struct v3dv_job *job = info->csd_job;
3331 
3332    assert(job->type == V3DV_JOB_TYPE_GPU_CSD);
3333    assert(wg_counts[0] > 0 && wg_counts[1] > 0 && wg_counts[2] > 0);
3334 
3335    struct drm_v3d_submit_csd *submit = &job->csd.submit;
3336 
3337    job->csd.wg_count[0] = wg_counts[0];
3338    job->csd.wg_count[1] = wg_counts[1];
3339    job->csd.wg_count[2] = wg_counts[2];
3340 
3341    submit->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3342    submit->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3343    submit->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3344 
3345    submit->cfg[4] = DIV_ROUND_UP(info->wg_size, 16) *
3346                     (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
3347    assert(submit->cfg[4] != ~0);
3348 
3349    if (info->needs_wg_uniform_rewrite) {
3350       /* Make sure the GPU is not currently accessing the indirect CL for this
3351        * job, since we are about to overwrite some of the uniform data.
3352        */
3353       v3dv_bo_wait(job->device, job->indirect.bo, PIPE_TIMEOUT_INFINITE);
3354 
3355       for (uint32_t i = 0; i < 3; i++) {
3356          if (info->wg_uniform_offsets[i]) {
3357             /* Sanity check that our uniform pointers are within the allocated
3358              * BO space for our indirect CL.
3359              */
3360             assert(info->wg_uniform_offsets[i] >= (uint32_t *) job->indirect.base);
3361             assert(info->wg_uniform_offsets[i] < (uint32_t *) job->indirect.next);
3362             *(info->wg_uniform_offsets[i]) = wg_counts[i];
3363          }
3364       }
3365    }
3366 }
3367 
3368 static struct v3dv_job *
cmd_buffer_create_csd_job(struct v3dv_cmd_buffer * cmd_buffer,uint32_t base_offset_x,uint32_t base_offset_y,uint32_t base_offset_z,uint32_t group_count_x,uint32_t group_count_y,uint32_t group_count_z,uint32_t ** wg_uniform_offsets_out,uint32_t * wg_size_out)3369 cmd_buffer_create_csd_job(struct v3dv_cmd_buffer *cmd_buffer,
3370                           uint32_t base_offset_x,
3371                           uint32_t base_offset_y,
3372                           uint32_t base_offset_z,
3373                           uint32_t group_count_x,
3374                           uint32_t group_count_y,
3375                           uint32_t group_count_z,
3376                           uint32_t **wg_uniform_offsets_out,
3377                           uint32_t *wg_size_out)
3378 {
3379    struct v3dv_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
3380    assert(pipeline && pipeline->shared_data->variants[BROADCOM_SHADER_COMPUTE]);
3381    struct v3dv_shader_variant *cs_variant =
3382       pipeline->shared_data->variants[BROADCOM_SHADER_COMPUTE];
3383 
3384    struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
3385                                     sizeof(struct v3dv_job), 8,
3386                                     VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3387    if (!job) {
3388       v3dv_flag_oom(cmd_buffer, NULL);
3389       return NULL;
3390    }
3391 
3392    v3dv_job_init(job, V3DV_JOB_TYPE_GPU_CSD, cmd_buffer->device, cmd_buffer, -1);
3393    cmd_buffer->state.job = job;
3394 
3395    struct drm_v3d_submit_csd *submit = &job->csd.submit;
3396 
3397    job->csd.wg_count[0] = group_count_x;
3398    job->csd.wg_count[1] = group_count_y;
3399    job->csd.wg_count[2] = group_count_z;
3400 
3401    job->csd.wg_base[0] = base_offset_x;
3402    job->csd.wg_base[1] = base_offset_y;
3403    job->csd.wg_base[2] = base_offset_z;
3404 
3405    submit->cfg[0] |= group_count_x << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3406    submit->cfg[1] |= group_count_y << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3407    submit->cfg[2] |= group_count_z << V3D_CSD_CFG012_WG_COUNT_SHIFT;
3408 
3409    const struct v3d_compute_prog_data *cpd =
3410       cs_variant->prog_data.cs;
3411 
3412    const uint32_t num_wgs = group_count_x * group_count_y * group_count_z;
3413    const uint32_t wg_size = cpd->local_size[0] *
3414                             cpd->local_size[1] *
3415                             cpd->local_size[2];
3416 
3417    uint32_t wgs_per_sg =
3418       v3d_csd_choose_workgroups_per_supergroup(
3419          &cmd_buffer->device->devinfo,
3420          cs_variant->prog_data.cs->has_subgroups,
3421          cs_variant->prog_data.cs->base.has_control_barrier,
3422          cs_variant->prog_data.cs->base.threads,
3423          num_wgs, wg_size);
3424 
3425    uint32_t batches_per_sg = DIV_ROUND_UP(wgs_per_sg * wg_size, 16);
3426    uint32_t whole_sgs = num_wgs / wgs_per_sg;
3427    uint32_t rem_wgs = num_wgs - whole_sgs * wgs_per_sg;
3428    uint32_t num_batches = batches_per_sg * whole_sgs +
3429                           DIV_ROUND_UP(rem_wgs * wg_size, 16);
3430 
3431    submit->cfg[3] |= (wgs_per_sg & 0xf) << V3D_CSD_CFG3_WGS_PER_SG_SHIFT;
3432    submit->cfg[3] |= (batches_per_sg - 1) << V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT;
3433    submit->cfg[3] |= (wg_size & 0xff) << V3D_CSD_CFG3_WG_SIZE_SHIFT;
3434    if (wg_size_out)
3435       *wg_size_out = wg_size;
3436 
3437    submit->cfg[4] = num_batches - 1;
3438    assert(submit->cfg[4] != ~0);
3439 
3440    assert(pipeline->shared_data->assembly_bo);
3441    struct v3dv_bo *cs_assembly_bo = pipeline->shared_data->assembly_bo;
3442 
3443    submit->cfg[5] = cs_assembly_bo->offset + cs_variant->assembly_offset;
3444    submit->cfg[5] |= V3D_CSD_CFG5_PROPAGATE_NANS;
3445    if (cs_variant->prog_data.base->single_seg)
3446       submit->cfg[5] |= V3D_CSD_CFG5_SINGLE_SEG;
3447    if (cs_variant->prog_data.base->threads == 4)
3448       submit->cfg[5] |= V3D_CSD_CFG5_THREADING;
3449 
3450    if (cs_variant->prog_data.cs->shared_size > 0) {
3451       job->csd.shared_memory =
3452          v3dv_bo_alloc(cmd_buffer->device,
3453                        cs_variant->prog_data.cs->shared_size * wgs_per_sg,
3454                        "shared_vars", true);
3455       if (!job->csd.shared_memory) {
3456          v3dv_flag_oom(cmd_buffer, NULL);
3457          return job;
3458       }
3459    }
3460 
3461    v3dv_job_add_bo_unchecked(job, cs_assembly_bo);
3462    struct v3dv_cl_reloc uniforms =
3463       v3dv_write_uniforms_wg_offsets(cmd_buffer, pipeline,
3464                                      cs_variant,
3465                                      wg_uniform_offsets_out);
3466    submit->cfg[6] = uniforms.bo->offset + uniforms.offset;
3467 
3468    v3dv_job_add_bo(job, uniforms.bo);
3469 
3470    return job;
3471 }
3472 
3473 static void
cmd_buffer_dispatch(struct v3dv_cmd_buffer * cmd_buffer,uint32_t base_offset_x,uint32_t base_offset_y,uint32_t base_offset_z,uint32_t group_count_x,uint32_t group_count_y,uint32_t group_count_z)3474 cmd_buffer_dispatch(struct v3dv_cmd_buffer *cmd_buffer,
3475                     uint32_t base_offset_x,
3476                     uint32_t base_offset_y,
3477                     uint32_t base_offset_z,
3478                     uint32_t group_count_x,
3479                     uint32_t group_count_y,
3480                     uint32_t group_count_z)
3481 {
3482    if (group_count_x == 0 || group_count_y == 0 || group_count_z == 0)
3483       return;
3484 
3485    struct v3dv_job *job =
3486       cmd_buffer_create_csd_job(cmd_buffer,
3487                                 base_offset_x,
3488                                 base_offset_y,
3489                                 base_offset_z,
3490                                 group_count_x,
3491                                 group_count_y,
3492                                 group_count_z,
3493                                 NULL, NULL);
3494 
3495    list_addtail(&job->list_link, &cmd_buffer->jobs);
3496    cmd_buffer->state.job = NULL;
3497 }
3498 
3499 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDispatch(VkCommandBuffer commandBuffer,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)3500 v3dv_CmdDispatch(VkCommandBuffer commandBuffer,
3501                  uint32_t groupCountX,
3502                  uint32_t groupCountY,
3503                  uint32_t groupCountZ)
3504 {
3505    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3506 
3507    cmd_buffer_emit_pre_dispatch(cmd_buffer);
3508    cmd_buffer_dispatch(cmd_buffer, 0, 0, 0,
3509                        groupCountX, groupCountY, groupCountZ);
3510 }
3511 
3512 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDispatchBase(VkCommandBuffer commandBuffer,uint32_t baseGroupX,uint32_t baseGroupY,uint32_t baseGroupZ,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)3513 v3dv_CmdDispatchBase(VkCommandBuffer commandBuffer,
3514                      uint32_t baseGroupX,
3515                      uint32_t baseGroupY,
3516                      uint32_t baseGroupZ,
3517                      uint32_t groupCountX,
3518                      uint32_t groupCountY,
3519                      uint32_t groupCountZ)
3520 {
3521    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3522 
3523    cmd_buffer_emit_pre_dispatch(cmd_buffer);
3524    cmd_buffer_dispatch(cmd_buffer,
3525                        baseGroupX, baseGroupY, baseGroupZ,
3526                        groupCountX, groupCountY, groupCountZ);
3527 }
3528 
3529 
3530 static void
cmd_buffer_dispatch_indirect(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_buffer * buffer,uint32_t offset)3531 cmd_buffer_dispatch_indirect(struct v3dv_cmd_buffer *cmd_buffer,
3532                              struct v3dv_buffer *buffer,
3533                              uint32_t offset)
3534 {
3535    /* We can't do indirect dispatches, so instead we record a CPU job that,
3536     * when executed in the queue, will map the indirect buffer, read the
3537     * dispatch parameters, and submit a regular dispatch.
3538     */
3539    struct v3dv_job *job =
3540       v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
3541                                      V3DV_JOB_TYPE_CPU_CSD_INDIRECT,
3542                                      cmd_buffer, -1);
3543    v3dv_return_if_oom(cmd_buffer, NULL);
3544 
3545    /* We need to create a CSD job now, even if we still don't know the actual
3546     * dispatch parameters, because the job setup needs to be done using the
3547     * current command buffer state (i.e. pipeline, descriptor sets, push
3548     * constants, etc.). So we create the job with default dispatch parameters
3549     * and we will rewrite the parts we need at submit time if the indirect
3550     * parameters don't match the ones we used to setup the job.
3551     */
3552    struct v3dv_job *csd_job =
3553       cmd_buffer_create_csd_job(cmd_buffer,
3554                                 0, 0, 0,
3555                                 1, 1, 1,
3556                                 &job->cpu.csd_indirect.wg_uniform_offsets[0],
3557                                 &job->cpu.csd_indirect.wg_size);
3558    v3dv_return_if_oom(cmd_buffer, NULL);
3559    assert(csd_job);
3560 
3561    job->cpu.csd_indirect.buffer = buffer;
3562    job->cpu.csd_indirect.offset = offset;
3563    job->cpu.csd_indirect.csd_job = csd_job;
3564 
3565    /* If the compute shader reads the workgroup sizes we will also need to
3566     * rewrite the corresponding uniforms.
3567     */
3568    job->cpu.csd_indirect.needs_wg_uniform_rewrite =
3569       job->cpu.csd_indirect.wg_uniform_offsets[0] ||
3570       job->cpu.csd_indirect.wg_uniform_offsets[1] ||
3571       job->cpu.csd_indirect.wg_uniform_offsets[2];
3572 
3573    list_addtail(&job->list_link, &cmd_buffer->jobs);
3574    cmd_buffer->state.job = NULL;
3575 }
3576 
3577 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset)3578 v3dv_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
3579                          VkBuffer _buffer,
3580                          VkDeviceSize offset)
3581 {
3582    V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
3583    V3DV_FROM_HANDLE(v3dv_buffer, buffer, _buffer);
3584 
3585    assert(offset <= UINT32_MAX);
3586 
3587    cmd_buffer_emit_pre_dispatch(cmd_buffer);
3588    cmd_buffer_dispatch_indirect(cmd_buffer, buffer, offset);
3589 }
3590 
3591 VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,uint32_t deviceMask)3592 v3dv_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
3593 {
3594    /* Nothing to do here since we only support a single device */
3595    assert(deviceMask == 0x1);
3596 }
3597