1 /*
2  * Copyright © 2021 Raspberry Pi Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "v3dv_private.h"
25 #include "broadcom/common/v3d_macros.h"
26 #include "broadcom/cle/v3dx_pack.h"
27 #include "broadcom/compiler/v3d_compiler.h"
28 
29 #include "util/half_float.h"
30 #include "vulkan/util/vk_format.h"
31 #include "util/u_pack_color.h"
32 
33 void
v3dX(job_emit_binning_flush)34 v3dX(job_emit_binning_flush)(struct v3dv_job *job)
35 {
36    assert(job);
37 
38    v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(FLUSH));
39    v3dv_return_if_oom(NULL, job);
40 
41    cl_emit(&job->bcl, FLUSH, flush);
42 }
43 
44 void
v3dX(job_emit_binning_prolog)45 v3dX(job_emit_binning_prolog)(struct v3dv_job *job,
46                               const struct v3dv_frame_tiling *tiling,
47                               uint32_t layers)
48 {
49    /* This must go before the binning mode configuration. It is
50     * required for layered framebuffers to work.
51     */
52    cl_emit(&job->bcl, NUMBER_OF_LAYERS, config) {
53       config.number_of_layers = layers;
54    }
55 
56    assert(!tiling->double_buffer || !tiling->msaa);
57    cl_emit(&job->bcl, TILE_BINNING_MODE_CFG, config) {
58       config.width_in_pixels = tiling->width;
59       config.height_in_pixels = tiling->height;
60       config.number_of_render_targets = MAX2(tiling->render_target_count, 1);
61       config.multisample_mode_4x = tiling->msaa;
62       config.double_buffer_in_non_ms_mode = tiling->double_buffer;
63       config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
64    }
65 
66    /* There's definitely nothing in the VCD cache we want. */
67    cl_emit(&job->bcl, FLUSH_VCD_CACHE, bin);
68 
69    /* "Binning mode lists must have a Start Tile Binning item (6) after
70     *  any prefix state data before the binning list proper starts."
71     */
72    cl_emit(&job->bcl, START_TILE_BINNING, bin);
73 }
74 
75 void
v3dX(cmd_buffer_end_render_pass_secondary)76 v3dX(cmd_buffer_end_render_pass_secondary)(struct v3dv_cmd_buffer *cmd_buffer)
77 {
78    assert(cmd_buffer->state.job);
79    v3dv_cl_ensure_space_with_branch(&cmd_buffer->state.job->bcl,
80                                     cl_packet_length(RETURN_FROM_SUB_LIST));
81    v3dv_return_if_oom(cmd_buffer, NULL);
82    cl_emit(&cmd_buffer->state.job->bcl, RETURN_FROM_SUB_LIST, ret);
83 }
84 
85 void
v3dX(job_emit_clip_window)86 v3dX(job_emit_clip_window)(struct v3dv_job *job, const VkRect2D *rect)
87 {
88    assert(job);
89 
90    v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CLIP_WINDOW));
91    v3dv_return_if_oom(NULL, job);
92 
93    cl_emit(&job->bcl, CLIP_WINDOW, clip) {
94       clip.clip_window_left_pixel_coordinate = rect->offset.x;
95       clip.clip_window_bottom_pixel_coordinate = rect->offset.y;
96       clip.clip_window_width_in_pixels = rect->extent.width;
97       clip.clip_window_height_in_pixels = rect->extent.height;
98    }
99 }
100 
101 static void
cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,struct v3dv_image_view * iview,uint32_t layer,uint32_t buffer)102 cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer *cmd_buffer,
103                                  struct v3dv_cl *cl,
104                                  struct v3dv_image_view *iview,
105                                  uint32_t layer,
106                                  uint32_t buffer)
107 {
108    const struct v3dv_image *image = (struct v3dv_image *) iview->vk.image;
109    const struct v3d_resource_slice *slice =
110       &image->slices[iview->vk.base_mip_level];
111    uint32_t layer_offset =
112       v3dv_layer_offset(image, iview->vk.base_mip_level,
113                         iview->vk.base_array_layer + layer);
114 
115    cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
116       load.buffer_to_load = buffer;
117       load.address = v3dv_cl_address(image->mem->bo, layer_offset);
118 
119       load.input_image_format = iview->format->rt_type;
120 
121       /* If we create an image view with only the stencil format, we
122        * re-interpret the format as RGBA8_UINT, as it is want we want in
123        * general (see CreateImageView).
124        *
125        * However, when we are loading/storing tiles from the ZSTENCIL tile
126        * buffer, we need to use the underlying DS format.
127        */
128       if (buffer == ZSTENCIL &&
129           iview->format->rt_type == V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI) {
130          assert(image->format->rt_type == V3D_OUTPUT_IMAGE_FORMAT_D24S8);
131          load.input_image_format = image->format->rt_type;
132       }
133 
134       load.r_b_swap = iview->swap_rb;
135       load.channel_reverse = iview->channel_reverse;
136       load.memory_format = slice->tiling;
137 
138       if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
139           slice->tiling == V3D_TILING_UIF_XOR) {
140          load.height_in_ub_or_stride =
141             slice->padded_height_of_output_image_in_uif_blocks;
142       } else if (slice->tiling == V3D_TILING_RASTER) {
143          load.height_in_ub_or_stride = slice->stride;
144       }
145 
146       if (image->vk.samples > VK_SAMPLE_COUNT_1_BIT)
147          load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
148       else
149          load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
150    }
151 }
152 
153 static bool
check_needs_load(const struct v3dv_cmd_buffer_state * state,VkImageAspectFlags aspect,uint32_t first_subpass_idx,VkAttachmentLoadOp load_op)154 check_needs_load(const struct v3dv_cmd_buffer_state *state,
155                  VkImageAspectFlags aspect,
156                  uint32_t first_subpass_idx,
157                  VkAttachmentLoadOp load_op)
158 {
159    /* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
160     * testing does not exist in the image.
161     */
162    if (!aspect)
163       return false;
164 
165    /* Attachment (or view) load operations apply on the first subpass that
166     * uses the attachment (or view), otherwise we always need to load.
167     */
168    if (state->job->first_subpass > first_subpass_idx)
169       return true;
170 
171    /* If the job is continuing a subpass started in another job, we always
172     * need to load.
173     */
174    if (state->job->is_subpass_continue)
175       return true;
176 
177    /* If the area is not aligned to tile boundaries, we always need to load */
178    if (!state->tile_aligned_render_area)
179       return true;
180 
181    /* The attachment load operations must be LOAD */
182    return load_op == VK_ATTACHMENT_LOAD_OP_LOAD;
183 }
184 
185 static inline uint32_t
v3dv_zs_buffer(bool depth,bool stencil)186 v3dv_zs_buffer(bool depth, bool stencil)
187 {
188    if (depth && stencil)
189       return ZSTENCIL;
190    else if (depth)
191       return Z;
192    else if (stencil)
193       return STENCIL;
194    return NONE;
195 }
196 
197 static void
cmd_buffer_render_pass_emit_loads(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,uint32_t layer)198 cmd_buffer_render_pass_emit_loads(struct v3dv_cmd_buffer *cmd_buffer,
199                                   struct v3dv_cl *cl,
200                                   uint32_t layer)
201 {
202    const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
203    const struct v3dv_render_pass *pass = state->pass;
204    const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
205 
206   assert(!pass->multiview_enabled || layer < MAX_MULTIVIEW_VIEW_COUNT);
207 
208    for (uint32_t i = 0; i < subpass->color_count; i++) {
209       uint32_t attachment_idx = subpass->color_attachments[i].attachment;
210 
211       if (attachment_idx == VK_ATTACHMENT_UNUSED)
212          continue;
213 
214       const struct v3dv_render_pass_attachment *attachment =
215          &state->pass->attachments[attachment_idx];
216 
217       /* According to the Vulkan spec:
218        *
219        *    "The load operation for each sample in an attachment happens before
220        *     any recorded command which accesses the sample in the first subpass
221        *     where the attachment is used."
222        *
223        * If the load operation is CLEAR, we must only clear once on the first
224        * subpass that uses the attachment (and in that case we don't LOAD).
225        * After that, we always want to load so we don't lose any rendering done
226        * by a previous subpass to the same attachment. We also want to load
227        * if the current job is continuing subpass work started by a previous
228        * job, for the same reason.
229        *
230        * If the render area is not aligned to tile boundaries then we have
231        * tiles which are partially covered by it. In this case, we need to
232        * load the tiles so we can preserve the pixels that are outside the
233        * render area for any such tiles.
234        */
235       uint32_t first_subpass = !pass->multiview_enabled ?
236          attachment->first_subpass :
237          attachment->views[layer].first_subpass;
238 
239       bool needs_load = check_needs_load(state,
240                                          VK_IMAGE_ASPECT_COLOR_BIT,
241                                          first_subpass,
242                                          attachment->desc.loadOp);
243       if (needs_load) {
244          struct v3dv_image_view *iview =
245             state->attachments[attachment_idx].image_view;
246          cmd_buffer_render_pass_emit_load(cmd_buffer, cl, iview,
247                                           layer, RENDER_TARGET_0 + i);
248       }
249    }
250 
251    uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
252    if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
253       const struct v3dv_render_pass_attachment *ds_attachment =
254          &state->pass->attachments[ds_attachment_idx];
255 
256       const VkImageAspectFlags ds_aspects =
257          vk_format_aspects(ds_attachment->desc.format);
258 
259       uint32_t ds_first_subpass = !pass->multiview_enabled ?
260          ds_attachment->first_subpass :
261          ds_attachment->views[layer].first_subpass;
262 
263       const bool needs_depth_load =
264          check_needs_load(state,
265                           ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
266                           ds_first_subpass,
267                           ds_attachment->desc.loadOp);
268 
269       const bool needs_stencil_load =
270          check_needs_load(state,
271                           ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
272                           ds_first_subpass,
273                           ds_attachment->desc.stencilLoadOp);
274 
275       if (needs_depth_load || needs_stencil_load) {
276          struct v3dv_image_view *iview =
277             state->attachments[ds_attachment_idx].image_view;
278          /* From the Vulkan spec:
279           *
280           *   "When an image view of a depth/stencil image is used as a
281           *   depth/stencil framebuffer attachment, the aspectMask is ignored
282           *   and both depth and stencil image subresources are used."
283           *
284           * So we ignore the aspects from the subresource range of the image
285           * view for the depth/stencil attachment, but we still need to restrict
286           * the to aspects compatible with the render pass and the image.
287           */
288          const uint32_t zs_buffer =
289             v3dv_zs_buffer(needs_depth_load, needs_stencil_load);
290          cmd_buffer_render_pass_emit_load(cmd_buffer, cl,
291                                           iview, layer, zs_buffer);
292       }
293    }
294 
295    cl_emit(cl, END_OF_LOADS, end);
296 }
297 
298 static void
cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,uint32_t attachment_idx,uint32_t layer,uint32_t buffer,bool clear,bool is_multisample_resolve)299 cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer *cmd_buffer,
300                                   struct v3dv_cl *cl,
301                                   uint32_t attachment_idx,
302                                   uint32_t layer,
303                                   uint32_t buffer,
304                                   bool clear,
305                                   bool is_multisample_resolve)
306 {
307    const struct v3dv_image_view *iview =
308       cmd_buffer->state.attachments[attachment_idx].image_view;
309    const struct v3dv_image *image = (struct v3dv_image *) iview->vk.image;
310    const struct v3d_resource_slice *slice =
311       &image->slices[iview->vk.base_mip_level];
312    uint32_t layer_offset = v3dv_layer_offset(image,
313                                              iview->vk.base_mip_level,
314                                              iview->vk.base_array_layer + layer);
315 
316    cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
317       store.buffer_to_store = buffer;
318       store.address = v3dv_cl_address(image->mem->bo, layer_offset);
319       store.clear_buffer_being_stored = clear;
320 
321       store.output_image_format = iview->format->rt_type;
322 
323       /* If we create an image view with only the stencil format, we
324        * re-interpret the format as RGBA8_UINT, as it is want we want in
325        * general (see CreateImageView).
326        *
327        * However, when we are loading/storing tiles from the ZSTENCIL tile
328        * buffer, we need to use the underlying DS format.
329        */
330       if (buffer == ZSTENCIL &&
331           iview->format->rt_type == V3D_OUTPUT_IMAGE_FORMAT_RGBA8UI) {
332          assert(image->format->rt_type == V3D_OUTPUT_IMAGE_FORMAT_D24S8);
333          store.output_image_format = image->format->rt_type;
334       }
335 
336       store.r_b_swap = iview->swap_rb;
337       store.channel_reverse = iview->channel_reverse;
338       store.memory_format = slice->tiling;
339 
340       if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
341           slice->tiling == V3D_TILING_UIF_XOR) {
342          store.height_in_ub_or_stride =
343             slice->padded_height_of_output_image_in_uif_blocks;
344       } else if (slice->tiling == V3D_TILING_RASTER) {
345          store.height_in_ub_or_stride = slice->stride;
346       }
347 
348       if (image->vk.samples > VK_SAMPLE_COUNT_1_BIT)
349          store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
350       else if (is_multisample_resolve)
351          store.decimate_mode = V3D_DECIMATE_MODE_4X;
352       else
353          store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
354    }
355 }
356 
357 static bool
check_needs_clear(const struct v3dv_cmd_buffer_state * state,VkImageAspectFlags aspect,uint32_t first_subpass_idx,VkAttachmentLoadOp load_op,bool do_clear_with_draw)358 check_needs_clear(const struct v3dv_cmd_buffer_state *state,
359                   VkImageAspectFlags aspect,
360                   uint32_t first_subpass_idx,
361                   VkAttachmentLoadOp load_op,
362                   bool do_clear_with_draw)
363 {
364    /* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
365     * testing does not exist in the image.
366     */
367    if (!aspect)
368       return false;
369 
370    /* If the aspect needs to be cleared with a draw call then we won't emit
371     * the clear here.
372     */
373    if (do_clear_with_draw)
374       return false;
375 
376    /* If this is resuming a subpass started with another job, then attachment
377     * load operations don't apply.
378     */
379    if (state->job->is_subpass_continue)
380       return false;
381 
382    /* If the render area is not aligned to tile boudaries we can't use the
383     * TLB for a clear.
384     */
385    if (!state->tile_aligned_render_area)
386       return false;
387 
388    /* If this job is running in a subpass other than the first subpass in
389     * which this attachment (or view) is used then attachment load operations
390     * don't apply.
391     */
392    if (state->job->first_subpass != first_subpass_idx)
393       return false;
394 
395    /* The attachment load operation must be CLEAR */
396    return load_op == VK_ATTACHMENT_LOAD_OP_CLEAR;
397 }
398 
399 static bool
check_needs_store(const struct v3dv_cmd_buffer_state * state,VkImageAspectFlags aspect,uint32_t last_subpass_idx,VkAttachmentStoreOp store_op)400 check_needs_store(const struct v3dv_cmd_buffer_state *state,
401                   VkImageAspectFlags aspect,
402                   uint32_t last_subpass_idx,
403                   VkAttachmentStoreOp store_op)
404 {
405    /* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
406     * testing does not exist in the image.
407     */
408    if (!aspect)
409       return false;
410 
411    /* Attachment (or view) store operations only apply on the last subpass
412     * where the attachment (or view)  is used, in other subpasses we always
413     * need to store.
414     */
415    if (state->subpass_idx < last_subpass_idx)
416       return true;
417 
418    /* Attachment store operations only apply on the last job we emit on the the
419     * last subpass where the attachment is used, otherwise we always need to
420     * store.
421     */
422    if (!state->job->is_subpass_finish)
423       return true;
424 
425    /* The attachment store operation must be STORE */
426    return store_op == VK_ATTACHMENT_STORE_OP_STORE;
427 }
428 
429 static void
cmd_buffer_render_pass_emit_stores(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,uint32_t layer)430 cmd_buffer_render_pass_emit_stores(struct v3dv_cmd_buffer *cmd_buffer,
431                                    struct v3dv_cl *cl,
432                                    uint32_t layer)
433 {
434    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
435    struct v3dv_render_pass *pass = state->pass;
436    const struct v3dv_subpass *subpass =
437       &pass->subpasses[state->subpass_idx];
438 
439    bool has_stores = false;
440    bool use_global_zs_clear = false;
441    bool use_global_rt_clear = false;
442 
443    assert(!pass->multiview_enabled || layer < MAX_MULTIVIEW_VIEW_COUNT);
444 
445    /* FIXME: separate stencil */
446    uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
447    if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
448       const struct v3dv_render_pass_attachment *ds_attachment =
449          &state->pass->attachments[ds_attachment_idx];
450 
451       assert(state->job->first_subpass >= ds_attachment->first_subpass);
452       assert(state->subpass_idx >= ds_attachment->first_subpass);
453       assert(state->subpass_idx <= ds_attachment->last_subpass);
454 
455       /* From the Vulkan spec, VkImageSubresourceRange:
456        *
457        *   "When an image view of a depth/stencil image is used as a
458        *   depth/stencil framebuffer attachment, the aspectMask is ignored
459        *   and both depth and stencil image subresources are used."
460        *
461        * So we ignore the aspects from the subresource range of the image
462        * view for the depth/stencil attachment, but we still need to restrict
463        * the to aspects compatible with the render pass and the image.
464        */
465       const VkImageAspectFlags aspects =
466          vk_format_aspects(ds_attachment->desc.format);
467 
468       /* Only clear once on the first subpass that uses the attachment */
469       uint32_t ds_first_subpass = !state->pass->multiview_enabled ?
470          ds_attachment->first_subpass :
471          ds_attachment->views[layer].first_subpass;
472 
473       bool needs_depth_clear =
474          check_needs_clear(state,
475                            aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
476                            ds_first_subpass,
477                            ds_attachment->desc.loadOp,
478                            subpass->do_depth_clear_with_draw);
479 
480       bool needs_stencil_clear =
481          check_needs_clear(state,
482                            aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
483                            ds_first_subpass,
484                            ds_attachment->desc.stencilLoadOp,
485                            subpass->do_stencil_clear_with_draw);
486 
487       /* Skip the last store if it is not required */
488       uint32_t ds_last_subpass = !pass->multiview_enabled ?
489          ds_attachment->last_subpass :
490          ds_attachment->views[layer].last_subpass;
491 
492       bool needs_depth_store =
493          check_needs_store(state,
494                            aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
495                            ds_last_subpass,
496                            ds_attachment->desc.storeOp);
497 
498       bool needs_stencil_store =
499          check_needs_store(state,
500                            aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
501                            ds_last_subpass,
502                            ds_attachment->desc.stencilStoreOp);
503 
504       /* If we have a resolve, handle it before storing the tile */
505       const struct v3dv_cmd_buffer_attachment_state *ds_att_state =
506          &state->attachments[ds_attachment_idx];
507       if (ds_att_state->use_tlb_resolve) {
508          assert(ds_att_state->has_resolve);
509          assert(subpass->resolve_depth || subpass->resolve_stencil);
510          const uint32_t resolve_attachment_idx =
511             subpass->ds_resolve_attachment.attachment;
512          assert(resolve_attachment_idx != VK_ATTACHMENT_UNUSED);
513 
514          const uint32_t zs_buffer =
515             v3dv_zs_buffer(subpass->resolve_depth, subpass->resolve_stencil);
516          cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
517                                            resolve_attachment_idx, layer,
518                                            zs_buffer,
519                                            false, false);
520          has_stores = true;
521       } else if (ds_att_state->has_resolve) {
522          /* If we can't use the TLB to implement the resolve we will need to
523           * store the attachment so we can implement it later using a blit.
524           */
525          needs_depth_store = subpass->resolve_depth;
526          needs_stencil_store = subpass->resolve_stencil;
527       }
528 
529       /* GFXH-1689: The per-buffer store command's clear buffer bit is broken
530        * for depth/stencil.
531        *
532        * There used to be some confusion regarding the Clear Tile Buffers
533        * Z/S bit also being broken, but we confirmed with Broadcom that this
534        * is not the case, it was just that some other hardware bugs (that we
535        * need to work around, such as GFXH-1461) could cause this bit to behave
536        * incorrectly.
537        *
538        * There used to be another issue where the RTs bit in the Clear Tile
539        * Buffers packet also cleared Z/S, but Broadcom confirmed this is
540        * fixed since V3D 4.1.
541        *
542        * So if we have to emit a clear of depth or stencil we don't use
543        * the per-buffer store clear bit, even if we need to store the buffers,
544        * instead we always have to use the Clear Tile Buffers Z/S bit.
545        * If we have configured the job to do early Z/S clearing, then we
546        * don't want to emit any Clear Tile Buffers command at all here.
547        *
548        * Note that GFXH-1689 is not reproduced in the simulator, where
549        * using the clear buffer bit in depth/stencil stores works fine.
550        */
551       use_global_zs_clear = !state->job->early_zs_clear &&
552          (needs_depth_clear || needs_stencil_clear);
553       if (needs_depth_store || needs_stencil_store) {
554          const uint32_t zs_buffer =
555             v3dv_zs_buffer(needs_depth_store, needs_stencil_store);
556          cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
557                                            ds_attachment_idx, layer,
558                                            zs_buffer, false, false);
559          has_stores = true;
560       }
561    }
562 
563    for (uint32_t i = 0; i < subpass->color_count; i++) {
564       uint32_t attachment_idx = subpass->color_attachments[i].attachment;
565 
566       if (attachment_idx == VK_ATTACHMENT_UNUSED)
567          continue;
568 
569       const struct v3dv_render_pass_attachment *attachment =
570          &state->pass->attachments[attachment_idx];
571 
572       assert(state->job->first_subpass >= attachment->first_subpass);
573       assert(state->subpass_idx >= attachment->first_subpass);
574       assert(state->subpass_idx <= attachment->last_subpass);
575 
576       /* Only clear once on the first subpass that uses the attachment */
577       uint32_t first_subpass = !pass->multiview_enabled ?
578          attachment->first_subpass :
579          attachment->views[layer].first_subpass;
580 
581       bool needs_clear =
582          check_needs_clear(state,
583                            VK_IMAGE_ASPECT_COLOR_BIT,
584                            first_subpass,
585                            attachment->desc.loadOp,
586                            false);
587 
588       /* Skip the last store if it is not required  */
589       uint32_t last_subpass = !pass->multiview_enabled ?
590          attachment->last_subpass :
591          attachment->views[layer].last_subpass;
592 
593       bool needs_store =
594          check_needs_store(state,
595                            VK_IMAGE_ASPECT_COLOR_BIT,
596                            last_subpass,
597                            attachment->desc.storeOp);
598 
599       /* If we need to resolve this attachment emit that store first. Notice
600        * that we must not request a tile buffer clear here in that case, since
601        * that would clear the tile buffer before we get to emit the actual
602        * color attachment store below, since the clear happens after the
603        * store is completed.
604        *
605        * If the attachment doesn't support TLB resolves (or the render area
606        * is not aligned to tile boundaries) then we will have to fallback to
607        * doing the resolve in a shader separately after this job, so we will
608        * need to store the multisampled attachment even if that wasn't
609        * requested by the client.
610        */
611       const struct v3dv_cmd_buffer_attachment_state *att_state =
612          &state->attachments[attachment_idx];
613       if (att_state->use_tlb_resolve) {
614          assert(att_state->has_resolve);
615          const uint32_t resolve_attachment_idx =
616             subpass->resolve_attachments[i].attachment;
617          cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
618                                            resolve_attachment_idx, layer,
619                                            RENDER_TARGET_0 + i,
620                                            false, true);
621          has_stores = true;
622       } else if (att_state->has_resolve) {
623          needs_store = true;
624       }
625 
626       /* Emit the color attachment store if needed */
627       if (needs_store) {
628          cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
629                                            attachment_idx, layer,
630                                            RENDER_TARGET_0 + i,
631                                            needs_clear && !use_global_rt_clear,
632                                            false);
633          has_stores = true;
634       } else if (needs_clear) {
635          use_global_rt_clear = true;
636       }
637    }
638 
639    /* We always need to emit at least one dummy store */
640    if (!has_stores) {
641       cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
642          store.buffer_to_store = NONE;
643       }
644    }
645 
646    /* If we have any depth/stencil clears we can't use the per-buffer clear
647     * bit and instead we have to emit a single clear of all tile buffers.
648     */
649    if (use_global_zs_clear || use_global_rt_clear) {
650       cl_emit(cl, CLEAR_TILE_BUFFERS, clear) {
651          clear.clear_z_stencil_buffer = use_global_zs_clear;
652          clear.clear_all_render_targets = use_global_rt_clear;
653       }
654    }
655 }
656 
657 static void
cmd_buffer_render_pass_emit_per_tile_rcl(struct v3dv_cmd_buffer * cmd_buffer,uint32_t layer)658 cmd_buffer_render_pass_emit_per_tile_rcl(struct v3dv_cmd_buffer *cmd_buffer,
659                                          uint32_t layer)
660 {
661    struct v3dv_job *job = cmd_buffer->state.job;
662    assert(job);
663 
664    /* Emit the generic list in our indirect state -- the rcl will just
665     * have pointers into it.
666     */
667    struct v3dv_cl *cl = &job->indirect;
668    v3dv_cl_ensure_space(cl, 200, 1);
669    v3dv_return_if_oom(cmd_buffer, NULL);
670 
671    struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
672 
673    cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
674 
675    cmd_buffer_render_pass_emit_loads(cmd_buffer, cl, layer);
676 
677    /* The binner starts out writing tiles assuming that the initial mode
678     * is triangles, so make sure that's the case.
679     */
680    cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
681       fmt.primitive_type = LIST_TRIANGLES;
682    }
683 
684    /* PTB assumes that value to be 0, but hw will not set it. */
685    cl_emit(cl, SET_INSTANCEID, set) {
686       set.instance_id = 0;
687    }
688 
689    cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
690 
691    cmd_buffer_render_pass_emit_stores(cmd_buffer, cl, layer);
692 
693    cl_emit(cl, END_OF_TILE_MARKER, end);
694 
695    cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
696 
697    cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
698       branch.start = tile_list_start;
699       branch.end = v3dv_cl_get_address(cl);
700    }
701 }
702 
703 static void
cmd_buffer_emit_render_pass_layer_rcl(struct v3dv_cmd_buffer * cmd_buffer,uint32_t layer)704 cmd_buffer_emit_render_pass_layer_rcl(struct v3dv_cmd_buffer *cmd_buffer,
705                                       uint32_t layer)
706 {
707    const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
708 
709    struct v3dv_job *job = cmd_buffer->state.job;
710    struct v3dv_cl *rcl = &job->rcl;
711 
712    /* If doing multicore binning, we would need to initialize each
713     * core's tile list here.
714     */
715    const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
716    const uint32_t tile_alloc_offset =
717       64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
718    cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
719       list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
720    }
721 
722    cmd_buffer_render_pass_emit_per_tile_rcl(cmd_buffer, layer);
723 
724    uint32_t supertile_w_in_pixels =
725       tiling->tile_width * tiling->supertile_width;
726    uint32_t supertile_h_in_pixels =
727       tiling->tile_height * tiling->supertile_height;
728    const uint32_t min_x_supertile =
729       state->render_area.offset.x / supertile_w_in_pixels;
730    const uint32_t min_y_supertile =
731       state->render_area.offset.y / supertile_h_in_pixels;
732 
733    uint32_t max_render_x = state->render_area.offset.x;
734    if (state->render_area.extent.width > 0)
735       max_render_x += state->render_area.extent.width - 1;
736    uint32_t max_render_y = state->render_area.offset.y;
737    if (state->render_area.extent.height > 0)
738       max_render_y += state->render_area.extent.height - 1;
739    const uint32_t max_x_supertile = max_render_x / supertile_w_in_pixels;
740    const uint32_t max_y_supertile = max_render_y / supertile_h_in_pixels;
741 
742    for (int y = min_y_supertile; y <= max_y_supertile; y++) {
743       for (int x = min_x_supertile; x <= max_x_supertile; x++) {
744          cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
745             coords.column_number_in_supertiles = x;
746             coords.row_number_in_supertiles = y;
747          }
748       }
749    }
750 }
751 
752 static void
set_rcl_early_z_config(struct v3dv_job * job,bool * early_z_disable,uint32_t * early_z_test_and_update_direction)753 set_rcl_early_z_config(struct v3dv_job *job,
754                        bool *early_z_disable,
755                        uint32_t *early_z_test_and_update_direction)
756 {
757    /* If this is true then we have not emitted any draw calls in this job
758     * and we don't get any benefits form early Z.
759     */
760    if (!job->decided_global_ez_enable) {
761       assert(job->draw_count == 0);
762       *early_z_disable = true;
763       return;
764    }
765 
766    switch (job->first_ez_state) {
767    case V3D_EZ_UNDECIDED:
768    case V3D_EZ_LT_LE:
769       *early_z_disable = false;
770       *early_z_test_and_update_direction = EARLY_Z_DIRECTION_LT_LE;
771       break;
772    case V3D_EZ_GT_GE:
773       *early_z_disable = false;
774       *early_z_test_and_update_direction = EARLY_Z_DIRECTION_GT_GE;
775       break;
776    case V3D_EZ_DISABLED:
777       *early_z_disable = true;
778       break;
779    }
780 }
781 
782 void
v3dX(cmd_buffer_emit_render_pass_rcl)783 v3dX(cmd_buffer_emit_render_pass_rcl)(struct v3dv_cmd_buffer *cmd_buffer)
784 {
785    struct v3dv_job *job = cmd_buffer->state.job;
786    assert(job);
787 
788    const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
789    const struct v3dv_framebuffer *framebuffer = state->framebuffer;
790 
791    /* We can't emit the RCL until we have a framebuffer, which we may not have
792     * if we are recording a secondary command buffer. In that case, we will
793     * have to wait until vkCmdExecuteCommands is called from a primary command
794     * buffer.
795     */
796    if (!framebuffer) {
797       assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
798       return;
799    }
800 
801    const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
802 
803    const uint32_t fb_layers = job->frame_tiling.layers;
804 
805    v3dv_cl_ensure_space_with_branch(&job->rcl, 200 +
806                                     MAX2(fb_layers, 1) * 256 *
807                                     cl_packet_length(SUPERTILE_COORDINATES));
808    v3dv_return_if_oom(cmd_buffer, NULL);
809 
810    assert(state->subpass_idx < state->pass->subpass_count);
811    const struct v3dv_render_pass *pass = state->pass;
812    const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
813    struct v3dv_cl *rcl = &job->rcl;
814 
815    /* Comon config must be the first TILE_RENDERING_MODE_CFG and
816     * Z_STENCIL_CLEAR_VALUES must be last. The ones in between are optional
817     * updates to the previous HW state.
818     */
819    bool do_early_zs_clear = false;
820    const uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
821    assert(!tiling->msaa || !tiling->double_buffer);
822    cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
823       config.image_width_pixels = framebuffer->width;
824       config.image_height_pixels = framebuffer->height;
825       config.number_of_render_targets = MAX2(subpass->color_count, 1);
826       config.multisample_mode_4x = tiling->msaa;
827       config.double_buffer_in_non_ms_mode = tiling->double_buffer;
828       config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
829 
830       if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
831          const struct v3dv_image_view *iview =
832             state->attachments[ds_attachment_idx].image_view;
833          config.internal_depth_type = iview->internal_type;
834 
835          set_rcl_early_z_config(job,
836                                 &config.early_z_disable,
837                                 &config.early_z_test_and_update_direction);
838 
839          /* Early-Z/S clear can be enabled if the job is clearing and not
840           * storing (or loading) depth. If a stencil aspect is also present
841           * we have the same requirements for it, however, in this case we
842           * can accept stencil loadOp DONT_CARE as well, so instead of
843           * checking that stencil is cleared we check that is not loaded.
844           *
845           * Early-Z/S clearing is independent of Early Z/S testing, so it is
846           * possible to enable one but not the other so long as their
847           * respective requirements are met.
848           */
849          struct v3dv_render_pass_attachment *ds_attachment =
850             &pass->attachments[ds_attachment_idx];
851 
852          const VkImageAspectFlags ds_aspects =
853             vk_format_aspects(ds_attachment->desc.format);
854 
855          bool needs_depth_clear =
856             check_needs_clear(state,
857                               ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
858                               ds_attachment->first_subpass,
859                               ds_attachment->desc.loadOp,
860                               subpass->do_depth_clear_with_draw);
861 
862          bool needs_depth_store =
863             check_needs_store(state,
864                               ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
865                               ds_attachment->last_subpass,
866                               ds_attachment->desc.storeOp) ||
867                               subpass->resolve_depth;
868 
869          do_early_zs_clear = needs_depth_clear && !needs_depth_store;
870          if (do_early_zs_clear &&
871              vk_format_has_stencil(ds_attachment->desc.format)) {
872             bool needs_stencil_load =
873                check_needs_load(state,
874                                 ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
875                                 ds_attachment->first_subpass,
876                                 ds_attachment->desc.stencilLoadOp);
877 
878             bool needs_stencil_store =
879                check_needs_store(state,
880                                  ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
881                                  ds_attachment->last_subpass,
882                                  ds_attachment->desc.stencilStoreOp) ||
883                                  subpass->resolve_stencil;
884 
885             do_early_zs_clear = !needs_stencil_load && !needs_stencil_store;
886          }
887 
888          config.early_depth_stencil_clear = do_early_zs_clear;
889       } else {
890          config.early_z_disable = true;
891       }
892    }
893 
894    /* If we enabled early Z/S clear, then we can't emit any "Clear Tile Buffers"
895     * commands with the Z/S bit set, so keep track of whether we enabled this
896     * in the job so we can skip these later.
897     */
898    job->early_zs_clear = do_early_zs_clear;
899 
900    for (uint32_t i = 0; i < subpass->color_count; i++) {
901       uint32_t attachment_idx = subpass->color_attachments[i].attachment;
902       if (attachment_idx == VK_ATTACHMENT_UNUSED)
903          continue;
904 
905       struct v3dv_image_view *iview =
906          state->attachments[attachment_idx].image_view;
907 
908       const struct v3dv_image *image = (struct v3dv_image *) iview->vk.image;
909       const struct v3d_resource_slice *slice =
910          &image->slices[iview->vk.base_mip_level];
911 
912       const uint32_t *clear_color =
913          &state->attachments[attachment_idx].clear_value.color[0];
914 
915       uint32_t clear_pad = 0;
916       if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
917           slice->tiling == V3D_TILING_UIF_XOR) {
918          int uif_block_height = v3d_utile_height(image->cpp) * 2;
919 
920          uint32_t implicit_padded_height =
921             align(framebuffer->height, uif_block_height) / uif_block_height;
922 
923          if (slice->padded_height_of_output_image_in_uif_blocks -
924              implicit_padded_height >= 15) {
925             clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
926          }
927       }
928 
929       cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
930          clear.clear_color_low_32_bits = clear_color[0];
931          clear.clear_color_next_24_bits = clear_color[1] & 0xffffff;
932          clear.render_target_number = i;
933       };
934 
935       if (iview->internal_bpp >= V3D_INTERNAL_BPP_64) {
936          cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
937             clear.clear_color_mid_low_32_bits =
938                ((clear_color[1] >> 24) | (clear_color[2] << 8));
939             clear.clear_color_mid_high_24_bits =
940                ((clear_color[2] >> 24) | ((clear_color[3] & 0xffff) << 8));
941             clear.render_target_number = i;
942          };
943       }
944 
945       if (iview->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
946          cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
947             clear.uif_padded_height_in_uif_blocks = clear_pad;
948             clear.clear_color_high_16_bits = clear_color[3] >> 16;
949             clear.render_target_number = i;
950          };
951       }
952    }
953 
954    cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
955       v3dX(cmd_buffer_render_pass_setup_render_target)
956          (cmd_buffer, 0, &rt.render_target_0_internal_bpp,
957           &rt.render_target_0_internal_type, &rt.render_target_0_clamp);
958       v3dX(cmd_buffer_render_pass_setup_render_target)
959          (cmd_buffer, 1, &rt.render_target_1_internal_bpp,
960           &rt.render_target_1_internal_type, &rt.render_target_1_clamp);
961       v3dX(cmd_buffer_render_pass_setup_render_target)
962          (cmd_buffer, 2, &rt.render_target_2_internal_bpp,
963           &rt.render_target_2_internal_type, &rt.render_target_2_clamp);
964       v3dX(cmd_buffer_render_pass_setup_render_target)
965          (cmd_buffer, 3, &rt.render_target_3_internal_bpp,
966           &rt.render_target_3_internal_type, &rt.render_target_3_clamp);
967    }
968 
969    /* Ends rendering mode config. */
970    if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
971       cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
972          clear.z_clear_value =
973             state->attachments[ds_attachment_idx].clear_value.z;
974          clear.stencil_clear_value =
975             state->attachments[ds_attachment_idx].clear_value.s;
976       };
977    } else {
978       cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
979          clear.z_clear_value = 1.0f;
980          clear.stencil_clear_value = 0;
981       };
982    }
983 
984    /* Always set initial block size before the first branch, which needs
985     * to match the value from binning mode config.
986     */
987    cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
988       init.use_auto_chained_tile_lists = true;
989       init.size_of_first_block_in_chained_tile_lists =
990          TILE_ALLOCATION_BLOCK_SIZE_64B;
991    }
992 
993    cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
994       config.number_of_bin_tile_lists = 1;
995       config.total_frame_width_in_tiles = tiling->draw_tiles_x;
996       config.total_frame_height_in_tiles = tiling->draw_tiles_y;
997 
998       config.supertile_width_in_tiles = tiling->supertile_width;
999       config.supertile_height_in_tiles = tiling->supertile_height;
1000 
1001       config.total_frame_width_in_supertiles =
1002          tiling->frame_width_in_supertiles;
1003       config.total_frame_height_in_supertiles =
1004          tiling->frame_height_in_supertiles;
1005    }
1006 
1007    /* Emit an initial clear of the tile buffers. This is necessary
1008     * for any buffers that should be cleared (since clearing
1009     * normally happens at the *end* of the generic tile list), but
1010     * it's also nice to clear everything so the first tile doesn't
1011     * inherit any contents from some previous frame.
1012     *
1013     * Also, implement the GFXH-1742 workaround. There's a race in
1014     * the HW between the RCL updating the TLB's internal type/size
1015     * and the spawning of the QPU instances using the TLB's current
1016     * internal type/size. To make sure the QPUs get the right
1017     * state, we need 1 dummy store in between internal type/size
1018     * changes on V3D 3.x, and 2 dummy stores on 4.x.
1019     */
1020    for (int i = 0; i < 2; i++) {
1021       cl_emit(rcl, TILE_COORDINATES, coords);
1022       cl_emit(rcl, END_OF_LOADS, end);
1023       cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
1024          store.buffer_to_store = NONE;
1025       }
1026       if (cmd_buffer->state.tile_aligned_render_area &&
1027           (i == 0 || v3dv_do_double_initial_tile_clear(tiling))) {
1028          cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
1029             clear.clear_z_stencil_buffer = !job->early_zs_clear;
1030             clear.clear_all_render_targets = true;
1031          }
1032       }
1033       cl_emit(rcl, END_OF_TILE_MARKER, end);
1034    }
1035 
1036    cl_emit(rcl, FLUSH_VCD_CACHE, flush);
1037 
1038    for (int layer = 0; layer < MAX2(1, fb_layers); layer++) {
1039       if (subpass->view_mask == 0 || (subpass->view_mask & (1u << layer)))
1040          cmd_buffer_emit_render_pass_layer_rcl(cmd_buffer, layer);
1041    }
1042 
1043    cl_emit(rcl, END_OF_RENDERING, end);
1044 }
1045 
1046 void
v3dX(cmd_buffer_emit_viewport)1047 v3dX(cmd_buffer_emit_viewport)(struct v3dv_cmd_buffer *cmd_buffer)
1048 {
1049    struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1050    /* FIXME: right now we only support one viewport. viewporst[0] would work
1051     * now, would need to change if we allow multiple viewports
1052     */
1053    float *vptranslate = dynamic->viewport.translate[0];
1054    float *vpscale = dynamic->viewport.scale[0];
1055 
1056    struct v3dv_job *job = cmd_buffer->state.job;
1057    assert(job);
1058 
1059    const uint32_t required_cl_size =
1060       cl_packet_length(CLIPPER_XY_SCALING) +
1061       cl_packet_length(CLIPPER_Z_SCALE_AND_OFFSET) +
1062       cl_packet_length(CLIPPER_Z_MIN_MAX_CLIPPING_PLANES) +
1063       cl_packet_length(VIEWPORT_OFFSET);
1064    v3dv_cl_ensure_space_with_branch(&job->bcl, required_cl_size);
1065    v3dv_return_if_oom(cmd_buffer, NULL);
1066 
1067    cl_emit(&job->bcl, CLIPPER_XY_SCALING, clip) {
1068       clip.viewport_half_width_in_1_256th_of_pixel = vpscale[0] * 256.0f;
1069       clip.viewport_half_height_in_1_256th_of_pixel = vpscale[1] * 256.0f;
1070    }
1071 
1072    cl_emit(&job->bcl, CLIPPER_Z_SCALE_AND_OFFSET, clip) {
1073       clip.viewport_z_offset_zc_to_zs = vptranslate[2];
1074       clip.viewport_z_scale_zc_to_zs = vpscale[2];
1075    }
1076    cl_emit(&job->bcl, CLIPPER_Z_MIN_MAX_CLIPPING_PLANES, clip) {
1077       /* Vulkan's Z NDC is [0..1], unlile OpenGL which is [-1, 1] */
1078       float z1 = vptranslate[2];
1079       float z2 = vptranslate[2] + vpscale[2];
1080       clip.minimum_zw = MIN2(z1, z2);
1081       clip.maximum_zw = MAX2(z1, z2);
1082    }
1083 
1084    cl_emit(&job->bcl, VIEWPORT_OFFSET, vp) {
1085       vp.viewport_centre_x_coordinate = vptranslate[0];
1086       vp.viewport_centre_y_coordinate = vptranslate[1];
1087    }
1088 
1089    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_VIEWPORT;
1090 }
1091 
1092 void
v3dX(cmd_buffer_emit_stencil)1093 v3dX(cmd_buffer_emit_stencil)(struct v3dv_cmd_buffer *cmd_buffer)
1094 {
1095    struct v3dv_job *job = cmd_buffer->state.job;
1096    assert(job);
1097 
1098    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1099    struct v3dv_dynamic_state *dynamic_state = &cmd_buffer->state.dynamic;
1100 
1101    const uint32_t dynamic_stencil_states = V3DV_DYNAMIC_STENCIL_COMPARE_MASK |
1102       V3DV_DYNAMIC_STENCIL_WRITE_MASK |
1103       V3DV_DYNAMIC_STENCIL_REFERENCE;
1104 
1105    v3dv_cl_ensure_space_with_branch(&job->bcl,
1106                                     2 * cl_packet_length(STENCIL_CFG));
1107    v3dv_return_if_oom(cmd_buffer, NULL);
1108 
1109    bool emitted_stencil = false;
1110    for (uint32_t i = 0; i < 2; i++) {
1111       if (pipeline->emit_stencil_cfg[i]) {
1112          if (dynamic_state->mask & dynamic_stencil_states) {
1113             cl_emit_with_prepacked(&job->bcl, STENCIL_CFG,
1114                                    pipeline->stencil_cfg[i], config) {
1115                if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK) {
1116                   config.stencil_test_mask =
1117                      i == 0 ? dynamic_state->stencil_compare_mask.front :
1118                      dynamic_state->stencil_compare_mask.back;
1119                }
1120                if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK) {
1121                   config.stencil_write_mask =
1122                      i == 0 ? dynamic_state->stencil_write_mask.front :
1123                      dynamic_state->stencil_write_mask.back;
1124                }
1125                if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_REFERENCE) {
1126                   config.stencil_ref_value =
1127                      i == 0 ? dynamic_state->stencil_reference.front :
1128                      dynamic_state->stencil_reference.back;
1129                }
1130             }
1131          } else {
1132             cl_emit_prepacked(&job->bcl, &pipeline->stencil_cfg[i]);
1133          }
1134 
1135          emitted_stencil = true;
1136       }
1137    }
1138 
1139    if (emitted_stencil) {
1140       const uint32_t dynamic_stencil_dirty_flags =
1141          V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
1142          V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
1143          V3DV_CMD_DIRTY_STENCIL_REFERENCE;
1144       cmd_buffer->state.dirty &= ~dynamic_stencil_dirty_flags;
1145    }
1146 }
1147 
1148 void
v3dX(cmd_buffer_emit_depth_bias)1149 v3dX(cmd_buffer_emit_depth_bias)(struct v3dv_cmd_buffer *cmd_buffer)
1150 {
1151    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1152    assert(pipeline);
1153 
1154    if (!pipeline->depth_bias.enabled)
1155       return;
1156 
1157    struct v3dv_job *job = cmd_buffer->state.job;
1158    assert(job);
1159 
1160    v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(DEPTH_OFFSET));
1161    v3dv_return_if_oom(cmd_buffer, NULL);
1162 
1163    struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1164    cl_emit(&job->bcl, DEPTH_OFFSET, bias) {
1165       bias.depth_offset_factor = dynamic->depth_bias.slope_factor;
1166       bias.depth_offset_units = dynamic->depth_bias.constant_factor;
1167       if (pipeline->depth_bias.is_z16)
1168          bias.depth_offset_units *= 256.0f;
1169       bias.limit = dynamic->depth_bias.depth_bias_clamp;
1170    }
1171 
1172    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_DEPTH_BIAS;
1173 }
1174 
1175 void
v3dX(cmd_buffer_emit_line_width)1176 v3dX(cmd_buffer_emit_line_width)(struct v3dv_cmd_buffer *cmd_buffer)
1177 {
1178    struct v3dv_job *job = cmd_buffer->state.job;
1179    assert(job);
1180 
1181    v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(LINE_WIDTH));
1182    v3dv_return_if_oom(cmd_buffer, NULL);
1183 
1184    cl_emit(&job->bcl, LINE_WIDTH, line) {
1185       line.line_width = cmd_buffer->state.dynamic.line_width;
1186    }
1187 
1188    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_LINE_WIDTH;
1189 }
1190 
1191 void
v3dX(cmd_buffer_emit_sample_state)1192 v3dX(cmd_buffer_emit_sample_state)(struct v3dv_cmd_buffer *cmd_buffer)
1193 {
1194    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1195    assert(pipeline);
1196 
1197    struct v3dv_job *job = cmd_buffer->state.job;
1198    assert(job);
1199 
1200    v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(SAMPLE_STATE));
1201    v3dv_return_if_oom(cmd_buffer, NULL);
1202 
1203    cl_emit(&job->bcl, SAMPLE_STATE, state) {
1204       state.coverage = 1.0f;
1205       state.mask = pipeline->sample_mask;
1206    }
1207 }
1208 
1209 void
v3dX(cmd_buffer_emit_blend)1210 v3dX(cmd_buffer_emit_blend)(struct v3dv_cmd_buffer *cmd_buffer)
1211 {
1212    struct v3dv_job *job = cmd_buffer->state.job;
1213    assert(job);
1214 
1215    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1216    assert(pipeline);
1217 
1218    const uint32_t blend_packets_size =
1219       cl_packet_length(BLEND_ENABLES) +
1220       cl_packet_length(BLEND_CONSTANT_COLOR) +
1221       cl_packet_length(BLEND_CFG) * V3D_MAX_DRAW_BUFFERS;
1222 
1223    v3dv_cl_ensure_space_with_branch(&job->bcl, blend_packets_size);
1224    v3dv_return_if_oom(cmd_buffer, NULL);
1225 
1226    if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
1227       if (pipeline->blend.enables) {
1228          cl_emit(&job->bcl, BLEND_ENABLES, enables) {
1229             enables.mask = pipeline->blend.enables;
1230          }
1231       }
1232 
1233       for (uint32_t i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) {
1234          if (pipeline->blend.enables & (1 << i))
1235             cl_emit_prepacked(&job->bcl, &pipeline->blend.cfg[i]);
1236       }
1237    }
1238 
1239    if (pipeline->blend.needs_color_constants &&
1240        cmd_buffer->state.dirty & V3DV_CMD_DIRTY_BLEND_CONSTANTS) {
1241       struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1242       cl_emit(&job->bcl, BLEND_CONSTANT_COLOR, color) {
1243          color.red_f16 = _mesa_float_to_half(dynamic->blend_constants[0]);
1244          color.green_f16 = _mesa_float_to_half(dynamic->blend_constants[1]);
1245          color.blue_f16 = _mesa_float_to_half(dynamic->blend_constants[2]);
1246          color.alpha_f16 = _mesa_float_to_half(dynamic->blend_constants[3]);
1247       }
1248       cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_BLEND_CONSTANTS;
1249    }
1250 }
1251 
1252 void
v3dX(cmd_buffer_emit_color_write_mask)1253 v3dX(cmd_buffer_emit_color_write_mask)(struct v3dv_cmd_buffer *cmd_buffer)
1254 {
1255    struct v3dv_job *job = cmd_buffer->state.job;
1256    v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(COLOR_WRITE_MASKS));
1257 
1258    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1259    struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1260    cl_emit(&job->bcl, COLOR_WRITE_MASKS, mask) {
1261       mask.mask = (~dynamic->color_write_enable |
1262                    pipeline->blend.color_write_masks) & 0xffff;
1263    }
1264 
1265    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_COLOR_WRITE_ENABLE;
1266 }
1267 
1268 static void
emit_flat_shade_flags(struct v3dv_job * job,int varying_offset,uint32_t varyings,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher)1269 emit_flat_shade_flags(struct v3dv_job *job,
1270                       int varying_offset,
1271                       uint32_t varyings,
1272                       enum V3DX(Varying_Flags_Action) lower,
1273                       enum V3DX(Varying_Flags_Action) higher)
1274 {
1275    v3dv_cl_ensure_space_with_branch(&job->bcl,
1276                                     cl_packet_length(FLAT_SHADE_FLAGS));
1277    v3dv_return_if_oom(NULL, job);
1278 
1279    cl_emit(&job->bcl, FLAT_SHADE_FLAGS, flags) {
1280       flags.varying_offset_v0 = varying_offset;
1281       flags.flat_shade_flags_for_varyings_v024 = varyings;
1282       flags.action_for_flat_shade_flags_of_lower_numbered_varyings = lower;
1283       flags.action_for_flat_shade_flags_of_higher_numbered_varyings = higher;
1284    }
1285 }
1286 
1287 static void
emit_noperspective_flags(struct v3dv_job * job,int varying_offset,uint32_t varyings,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher)1288 emit_noperspective_flags(struct v3dv_job *job,
1289                          int varying_offset,
1290                          uint32_t varyings,
1291                          enum V3DX(Varying_Flags_Action) lower,
1292                          enum V3DX(Varying_Flags_Action) higher)
1293 {
1294    v3dv_cl_ensure_space_with_branch(&job->bcl,
1295                                     cl_packet_length(NON_PERSPECTIVE_FLAGS));
1296    v3dv_return_if_oom(NULL, job);
1297 
1298    cl_emit(&job->bcl, NON_PERSPECTIVE_FLAGS, flags) {
1299       flags.varying_offset_v0 = varying_offset;
1300       flags.non_perspective_flags_for_varyings_v024 = varyings;
1301       flags.action_for_non_perspective_flags_of_lower_numbered_varyings = lower;
1302       flags.action_for_non_perspective_flags_of_higher_numbered_varyings = higher;
1303    }
1304 }
1305 
1306 static void
emit_centroid_flags(struct v3dv_job * job,int varying_offset,uint32_t varyings,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher)1307 emit_centroid_flags(struct v3dv_job *job,
1308                     int varying_offset,
1309                     uint32_t varyings,
1310                     enum V3DX(Varying_Flags_Action) lower,
1311                     enum V3DX(Varying_Flags_Action) higher)
1312 {
1313    v3dv_cl_ensure_space_with_branch(&job->bcl,
1314                                     cl_packet_length(CENTROID_FLAGS));
1315    v3dv_return_if_oom(NULL, job);
1316 
1317    cl_emit(&job->bcl, CENTROID_FLAGS, flags) {
1318       flags.varying_offset_v0 = varying_offset;
1319       flags.centroid_flags_for_varyings_v024 = varyings;
1320       flags.action_for_centroid_flags_of_lower_numbered_varyings = lower;
1321       flags.action_for_centroid_flags_of_higher_numbered_varyings = higher;
1322    }
1323 }
1324 
1325 static bool
emit_varying_flags(struct v3dv_job * job,uint32_t num_flags,const uint32_t * flags,void (* flag_emit_callback)(struct v3dv_job * job,int varying_offset,uint32_t flags,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher))1326 emit_varying_flags(struct v3dv_job *job,
1327                    uint32_t num_flags,
1328                    const uint32_t *flags,
1329                    void (*flag_emit_callback)(struct v3dv_job *job,
1330                                               int varying_offset,
1331                                               uint32_t flags,
1332                                               enum V3DX(Varying_Flags_Action) lower,
1333                                               enum V3DX(Varying_Flags_Action) higher))
1334 {
1335    bool emitted_any = false;
1336    for (int i = 0; i < num_flags; i++) {
1337       if (!flags[i])
1338          continue;
1339 
1340       if (emitted_any) {
1341          flag_emit_callback(job, i, flags[i],
1342                             V3D_VARYING_FLAGS_ACTION_UNCHANGED,
1343                             V3D_VARYING_FLAGS_ACTION_UNCHANGED);
1344       } else if (i == 0) {
1345          flag_emit_callback(job, i, flags[i],
1346                             V3D_VARYING_FLAGS_ACTION_UNCHANGED,
1347                             V3D_VARYING_FLAGS_ACTION_ZEROED);
1348       } else {
1349          flag_emit_callback(job, i, flags[i],
1350                             V3D_VARYING_FLAGS_ACTION_ZEROED,
1351                             V3D_VARYING_FLAGS_ACTION_ZEROED);
1352       }
1353 
1354       emitted_any = true;
1355    }
1356 
1357    return emitted_any;
1358 }
1359 
1360 void
v3dX(cmd_buffer_emit_varyings_state)1361 v3dX(cmd_buffer_emit_varyings_state)(struct v3dv_cmd_buffer *cmd_buffer)
1362 {
1363    struct v3dv_job *job = cmd_buffer->state.job;
1364    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1365 
1366    struct v3d_fs_prog_data *prog_data_fs =
1367       pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]->prog_data.fs;
1368 
1369    const uint32_t num_flags =
1370       ARRAY_SIZE(prog_data_fs->flat_shade_flags);
1371    const uint32_t *flat_shade_flags = prog_data_fs->flat_shade_flags;
1372    const uint32_t *noperspective_flags =  prog_data_fs->noperspective_flags;
1373    const uint32_t *centroid_flags = prog_data_fs->centroid_flags;
1374 
1375    if (!emit_varying_flags(job, num_flags, flat_shade_flags,
1376                            emit_flat_shade_flags)) {
1377       v3dv_cl_ensure_space_with_branch(
1378          &job->bcl, cl_packet_length(ZERO_ALL_FLAT_SHADE_FLAGS));
1379       v3dv_return_if_oom(cmd_buffer, NULL);
1380 
1381       cl_emit(&job->bcl, ZERO_ALL_FLAT_SHADE_FLAGS, flags);
1382    }
1383 
1384    if (!emit_varying_flags(job, num_flags, noperspective_flags,
1385                            emit_noperspective_flags)) {
1386       v3dv_cl_ensure_space_with_branch(
1387          &job->bcl, cl_packet_length(ZERO_ALL_NON_PERSPECTIVE_FLAGS));
1388       v3dv_return_if_oom(cmd_buffer, NULL);
1389 
1390       cl_emit(&job->bcl, ZERO_ALL_NON_PERSPECTIVE_FLAGS, flags);
1391    }
1392 
1393    if (!emit_varying_flags(job, num_flags, centroid_flags,
1394                            emit_centroid_flags)) {
1395       v3dv_cl_ensure_space_with_branch(
1396          &job->bcl, cl_packet_length(ZERO_ALL_CENTROID_FLAGS));
1397       v3dv_return_if_oom(cmd_buffer, NULL);
1398 
1399       cl_emit(&job->bcl, ZERO_ALL_CENTROID_FLAGS, flags);
1400    }
1401 }
1402 
1403 static void
job_update_ez_state(struct v3dv_job * job,struct v3dv_pipeline * pipeline,struct v3dv_cmd_buffer * cmd_buffer)1404 job_update_ez_state(struct v3dv_job *job,
1405                     struct v3dv_pipeline *pipeline,
1406                     struct v3dv_cmd_buffer *cmd_buffer)
1407 {
1408    /* If first_ez_state is V3D_EZ_DISABLED it means that we have already
1409     * determined that we should disable EZ completely for all draw calls in
1410     * this job. This will cause us to disable EZ for the entire job in the
1411     * Tile Rendering Mode RCL packet and when we do that we need to make sure
1412     * we never emit a draw call in the job with EZ enabled in the CFG_BITS
1413     * packet, so ez_state must also be V3D_EZ_DISABLED;
1414     */
1415    if (job->first_ez_state == V3D_EZ_DISABLED) {
1416       assert(job->ez_state == V3D_EZ_DISABLED);
1417       return;
1418    }
1419 
1420    /* This is part of the pre draw call handling, so we should be inside a
1421     * render pass.
1422     */
1423    assert(cmd_buffer->state.pass);
1424 
1425    /* If this is the first time we update EZ state for this job we first check
1426     * if there is anything that requires disabling it completely for the entire
1427     * job (based on state that is not related to the current draw call and
1428     * pipeline state).
1429     */
1430    if (!job->decided_global_ez_enable) {
1431       job->decided_global_ez_enable = true;
1432 
1433       struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1434       assert(state->subpass_idx < state->pass->subpass_count);
1435       struct v3dv_subpass *subpass = &state->pass->subpasses[state->subpass_idx];
1436       if (subpass->ds_attachment.attachment == VK_ATTACHMENT_UNUSED) {
1437          job->first_ez_state = V3D_EZ_DISABLED;
1438          job->ez_state = V3D_EZ_DISABLED;
1439          return;
1440       }
1441 
1442       /* GFXH-1918: the early-z buffer may load incorrect depth values
1443        * if the frame has odd width or height.
1444        *
1445        * So we need to disable EZ in this case.
1446        */
1447       const struct v3dv_render_pass_attachment *ds_attachment =
1448          &state->pass->attachments[subpass->ds_attachment.attachment];
1449 
1450       const VkImageAspectFlags ds_aspects =
1451          vk_format_aspects(ds_attachment->desc.format);
1452 
1453       bool needs_depth_load =
1454          check_needs_load(state,
1455                           ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1456                           ds_attachment->first_subpass,
1457                           ds_attachment->desc.loadOp);
1458 
1459       if (needs_depth_load) {
1460          struct v3dv_framebuffer *fb = state->framebuffer;
1461 
1462          if (!fb) {
1463             assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1464             perf_debug("Loading depth aspect in a secondary command buffer "
1465                        "without framebuffer info disables early-z tests.\n");
1466             job->first_ez_state = V3D_EZ_DISABLED;
1467             job->ez_state = V3D_EZ_DISABLED;
1468             return;
1469          }
1470 
1471          if (((fb->width % 2) != 0 || (fb->height % 2) != 0)) {
1472             perf_debug("Loading depth aspect for framebuffer with odd width "
1473                        "or height disables early-Z tests.\n");
1474             job->first_ez_state = V3D_EZ_DISABLED;
1475             job->ez_state = V3D_EZ_DISABLED;
1476             return;
1477          }
1478       }
1479    }
1480 
1481    /* Otherwise, we can decide to selectively enable or disable EZ for draw
1482     * calls using the CFG_BITS packet based on the bound pipeline state.
1483     */
1484 
1485    /* If the FS writes Z, then it may update against the chosen EZ direction */
1486    struct v3dv_shader_variant *fs_variant =
1487       pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1488    if (fs_variant->prog_data.fs->writes_z &&
1489        !fs_variant->prog_data.fs->writes_z_from_fep) {
1490       job->ez_state = V3D_EZ_DISABLED;
1491       return;
1492    }
1493 
1494    switch (pipeline->ez_state) {
1495    case V3D_EZ_UNDECIDED:
1496       /* If the pipeline didn't pick a direction but didn't disable, then go
1497        * along with the current EZ state. This allows EZ optimization for Z
1498        * func == EQUAL or NEVER.
1499        */
1500       break;
1501 
1502    case V3D_EZ_LT_LE:
1503    case V3D_EZ_GT_GE:
1504       /* If the pipeline picked a direction, then it needs to match the current
1505        * direction if we've decided on one.
1506        */
1507       if (job->ez_state == V3D_EZ_UNDECIDED)
1508          job->ez_state = pipeline->ez_state;
1509       else if (job->ez_state != pipeline->ez_state)
1510          job->ez_state = V3D_EZ_DISABLED;
1511       break;
1512 
1513    case V3D_EZ_DISABLED:
1514       /* If the pipeline disables EZ because of a bad Z func or stencil
1515        * operation, then we can't do any more EZ in this frame.
1516        */
1517       job->ez_state = V3D_EZ_DISABLED;
1518       break;
1519    }
1520 
1521    if (job->first_ez_state == V3D_EZ_UNDECIDED &&
1522        job->ez_state != V3D_EZ_DISABLED) {
1523       job->first_ez_state = job->ez_state;
1524    }
1525 }
1526 
1527 void
v3dX(cmd_buffer_emit_configuration_bits)1528 v3dX(cmd_buffer_emit_configuration_bits)(struct v3dv_cmd_buffer *cmd_buffer)
1529 {
1530    struct v3dv_job *job = cmd_buffer->state.job;
1531    assert(job);
1532 
1533    struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1534    assert(pipeline);
1535 
1536    job_update_ez_state(job, pipeline, cmd_buffer);
1537 
1538    v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CFG_BITS));
1539    v3dv_return_if_oom(cmd_buffer, NULL);
1540 
1541    cl_emit_with_prepacked(&job->bcl, CFG_BITS, pipeline->cfg_bits, config) {
1542       config.early_z_enable = job->ez_state != V3D_EZ_DISABLED;
1543       config.early_z_updates_enable = config.early_z_enable &&
1544          pipeline->z_updates_enable;
1545    }
1546 }
1547 
1548 void
v3dX(cmd_buffer_emit_occlusion_query)1549 v3dX(cmd_buffer_emit_occlusion_query)(struct v3dv_cmd_buffer *cmd_buffer)
1550 {
1551    struct v3dv_job *job = cmd_buffer->state.job;
1552    assert(job);
1553 
1554    v3dv_cl_ensure_space_with_branch(&job->bcl,
1555                                     cl_packet_length(OCCLUSION_QUERY_COUNTER));
1556    v3dv_return_if_oom(cmd_buffer, NULL);
1557 
1558    cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter) {
1559       if (cmd_buffer->state.query.active_query.bo) {
1560          counter.address =
1561             v3dv_cl_address(cmd_buffer->state.query.active_query.bo,
1562                             cmd_buffer->state.query.active_query.offset);
1563       }
1564    }
1565 
1566    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
1567 }
1568 
1569 static struct v3dv_job *
cmd_buffer_subpass_split_for_barrier(struct v3dv_cmd_buffer * cmd_buffer,bool is_bcl_barrier)1570 cmd_buffer_subpass_split_for_barrier(struct v3dv_cmd_buffer *cmd_buffer,
1571                                      bool is_bcl_barrier)
1572 {
1573    assert(cmd_buffer->state.subpass_idx != -1);
1574    v3dv_cmd_buffer_finish_job(cmd_buffer);
1575    struct v3dv_job *job =
1576       v3dv_cmd_buffer_subpass_resume(cmd_buffer,
1577                                      cmd_buffer->state.subpass_idx);
1578    if (!job)
1579       return NULL;
1580 
1581    job->serialize = true;
1582    job->needs_bcl_sync = is_bcl_barrier;
1583    return job;
1584 }
1585 
1586 static void
cmd_buffer_copy_secondary_end_query_state(struct v3dv_cmd_buffer * primary,struct v3dv_cmd_buffer * secondary)1587 cmd_buffer_copy_secondary_end_query_state(struct v3dv_cmd_buffer *primary,
1588                                           struct v3dv_cmd_buffer *secondary)
1589 {
1590    struct v3dv_cmd_buffer_state *p_state = &primary->state;
1591    struct v3dv_cmd_buffer_state *s_state = &secondary->state;
1592 
1593    const uint32_t total_state_count =
1594       p_state->query.end.used_count + s_state->query.end.used_count;
1595    v3dv_cmd_buffer_ensure_array_state(primary,
1596                                       sizeof(struct v3dv_end_query_cpu_job_info),
1597                                       total_state_count,
1598                                       &p_state->query.end.alloc_count,
1599                                       (void **) &p_state->query.end.states);
1600    v3dv_return_if_oom(primary, NULL);
1601 
1602    for (uint32_t i = 0; i < s_state->query.end.used_count; i++) {
1603       const struct v3dv_end_query_cpu_job_info *s_qstate =
1604          &secondary->state.query.end.states[i];
1605 
1606       struct v3dv_end_query_cpu_job_info *p_qstate =
1607          &p_state->query.end.states[p_state->query.end.used_count++];
1608 
1609       p_qstate->pool = s_qstate->pool;
1610       p_qstate->query = s_qstate->query;
1611    }
1612 }
1613 
1614 void
v3dX(cmd_buffer_execute_inside_pass)1615 v3dX(cmd_buffer_execute_inside_pass)(struct v3dv_cmd_buffer *primary,
1616                                      uint32_t cmd_buffer_count,
1617                                      const VkCommandBuffer *cmd_buffers)
1618 {
1619    assert(primary->state.job);
1620 
1621    /* Emit occlusion query state if needed so the draw calls inside our
1622     * secondaries update the counters.
1623     */
1624    bool has_occlusion_query =
1625       primary->state.dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY;
1626    if (has_occlusion_query)
1627       v3dX(cmd_buffer_emit_occlusion_query)(primary);
1628 
1629    /* FIXME: if our primary job tiling doesn't enable MSSA but any of the
1630     * pipelines used by the secondaries do, we need to re-start the primary
1631     * job to enable MSAA. See cmd_buffer_restart_job_for_msaa_if_needed.
1632     */
1633    bool pending_barrier = false;
1634    bool pending_bcl_barrier = false;
1635    for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1636       V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
1637 
1638       assert(secondary->usage_flags &
1639              VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
1640 
1641       list_for_each_entry(struct v3dv_job, secondary_job,
1642                           &secondary->jobs, list_link) {
1643          if (secondary_job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
1644             /* If the job is a CL, then we branch to it from the primary BCL.
1645              * In this case the secondary's BCL is finished with a
1646              * RETURN_FROM_SUB_LIST command to return back to the primary BCL
1647              * once we are done executing it.
1648              */
1649             assert(v3dv_cl_offset(&secondary_job->rcl) == 0);
1650             assert(secondary_job->bcl.bo);
1651 
1652             /* Sanity check that secondary BCL ends with RETURN_FROM_SUB_LIST */
1653             STATIC_ASSERT(cl_packet_length(RETURN_FROM_SUB_LIST) == 1);
1654             assert(v3dv_cl_offset(&secondary_job->bcl) >= 1);
1655             assert(*(((uint8_t *)secondary_job->bcl.next) - 1) ==
1656                    V3DX(RETURN_FROM_SUB_LIST_opcode));
1657 
1658             /* If this secondary has any barriers (or we had any pending barrier
1659              * to apply), then we can't just branch to it from the primary, we
1660              * need to split the primary to create a new job that can consume
1661              * the barriers first.
1662              *
1663              * FIXME: in this case, maybe just copy the secondary BCL without
1664              * the RETURN_FROM_SUB_LIST into the primary job to skip the
1665              * branch?
1666              */
1667             struct v3dv_job *primary_job = primary->state.job;
1668             if (!primary_job || secondary_job->serialize || pending_barrier) {
1669                const bool needs_bcl_barrier =
1670                   secondary_job->needs_bcl_sync || pending_bcl_barrier;
1671                primary_job =
1672                   cmd_buffer_subpass_split_for_barrier(primary,
1673                                                        needs_bcl_barrier);
1674                v3dv_return_if_oom(primary, NULL);
1675 
1676                /* Since we have created a new primary we need to re-emit
1677                 * occlusion query state.
1678                 */
1679                if (has_occlusion_query)
1680                   v3dX(cmd_buffer_emit_occlusion_query)(primary);
1681             }
1682 
1683             /* Make sure our primary job has all required BO references */
1684             set_foreach(secondary_job->bos, entry) {
1685                struct v3dv_bo *bo = (struct v3dv_bo *)entry->key;
1686                v3dv_job_add_bo(primary_job, bo);
1687             }
1688 
1689             /* Emit required branch instructions. We expect each of these
1690              * to end with a corresponding 'return from sub list' item.
1691              */
1692             list_for_each_entry(struct v3dv_bo, bcl_bo,
1693                                 &secondary_job->bcl.bo_list, list_link) {
1694                v3dv_cl_ensure_space_with_branch(&primary_job->bcl,
1695                                                 cl_packet_length(BRANCH_TO_SUB_LIST));
1696                v3dv_return_if_oom(primary, NULL);
1697                cl_emit(&primary_job->bcl, BRANCH_TO_SUB_LIST, branch) {
1698                   branch.address = v3dv_cl_address(bcl_bo, 0);
1699                }
1700             }
1701 
1702             primary_job->tmu_dirty_rcl |= secondary_job->tmu_dirty_rcl;
1703          } else {
1704             /* This is a regular job (CPU or GPU), so just finish the current
1705              * primary job (if any) and then add the secondary job to the
1706              * primary's job list right after it.
1707              */
1708             v3dv_cmd_buffer_finish_job(primary);
1709             v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
1710             if (pending_barrier) {
1711                secondary_job->serialize = true;
1712                if (pending_bcl_barrier)
1713                   secondary_job->needs_bcl_sync = true;
1714             }
1715          }
1716 
1717          pending_barrier = false;
1718          pending_bcl_barrier = false;
1719       }
1720 
1721       /* If the secondary has recorded any vkCmdEndQuery commands, we need to
1722        * copy this state to the primary so it is processed properly when the
1723        * current primary job is finished.
1724        */
1725       cmd_buffer_copy_secondary_end_query_state(primary, secondary);
1726 
1727       /* If this secondary had any pending barrier state we will need that
1728        * barrier state consumed with whatever comes next in the primary.
1729        */
1730       assert(secondary->state.has_barrier || !secondary->state.has_bcl_barrier);
1731       pending_barrier = secondary->state.has_barrier;
1732       pending_bcl_barrier = secondary->state.has_bcl_barrier;
1733    }
1734 
1735    if (pending_barrier) {
1736       primary->state.has_barrier = true;
1737       primary->state.has_bcl_barrier |= pending_bcl_barrier;
1738    }
1739 }
1740 
1741 static void
emit_gs_shader_state_record(struct v3dv_job * job,struct v3dv_bo * assembly_bo,struct v3dv_shader_variant * gs_bin,struct v3dv_cl_reloc gs_bin_uniforms,struct v3dv_shader_variant * gs,struct v3dv_cl_reloc gs_render_uniforms)1742 emit_gs_shader_state_record(struct v3dv_job *job,
1743                             struct v3dv_bo *assembly_bo,
1744                             struct v3dv_shader_variant *gs_bin,
1745                             struct v3dv_cl_reloc gs_bin_uniforms,
1746                             struct v3dv_shader_variant *gs,
1747                             struct v3dv_cl_reloc gs_render_uniforms)
1748 {
1749    cl_emit(&job->indirect, GEOMETRY_SHADER_STATE_RECORD, shader) {
1750       shader.geometry_bin_mode_shader_code_address =
1751          v3dv_cl_address(assembly_bo, gs_bin->assembly_offset);
1752       shader.geometry_bin_mode_shader_4_way_threadable =
1753          gs_bin->prog_data.gs->base.threads == 4;
1754       shader.geometry_bin_mode_shader_start_in_final_thread_section =
1755          gs_bin->prog_data.gs->base.single_seg;
1756       shader.geometry_bin_mode_shader_propagate_nans = true;
1757       shader.geometry_bin_mode_shader_uniforms_address =
1758          gs_bin_uniforms;
1759 
1760       shader.geometry_render_mode_shader_code_address =
1761          v3dv_cl_address(assembly_bo, gs->assembly_offset);
1762       shader.geometry_render_mode_shader_4_way_threadable =
1763          gs->prog_data.gs->base.threads == 4;
1764       shader.geometry_render_mode_shader_start_in_final_thread_section =
1765          gs->prog_data.gs->base.single_seg;
1766       shader.geometry_render_mode_shader_propagate_nans = true;
1767       shader.geometry_render_mode_shader_uniforms_address =
1768          gs_render_uniforms;
1769    }
1770 }
1771 
1772 static uint8_t
v3d_gs_output_primitive(enum shader_prim prim_type)1773 v3d_gs_output_primitive(enum shader_prim prim_type)
1774 {
1775     switch (prim_type) {
1776     case SHADER_PRIM_POINTS:
1777         return GEOMETRY_SHADER_POINTS;
1778     case SHADER_PRIM_LINE_STRIP:
1779         return GEOMETRY_SHADER_LINE_STRIP;
1780     case SHADER_PRIM_TRIANGLE_STRIP:
1781         return GEOMETRY_SHADER_TRI_STRIP;
1782     default:
1783         unreachable("Unsupported primitive type");
1784     }
1785 }
1786 
1787 static void
emit_tes_gs_common_params(struct v3dv_job * job,uint8_t gs_out_prim_type,uint8_t gs_num_invocations)1788 emit_tes_gs_common_params(struct v3dv_job *job,
1789                           uint8_t gs_out_prim_type,
1790                           uint8_t gs_num_invocations)
1791 {
1792    cl_emit(&job->indirect, TESSELLATION_GEOMETRY_COMMON_PARAMS, shader) {
1793       shader.tessellation_type = TESSELLATION_TYPE_TRIANGLE;
1794       shader.tessellation_point_mode = false;
1795       shader.tessellation_edge_spacing = TESSELLATION_EDGE_SPACING_EVEN;
1796       shader.tessellation_clockwise = true;
1797       shader.tessellation_invocations = 1;
1798 
1799       shader.geometry_shader_output_format =
1800          v3d_gs_output_primitive(gs_out_prim_type);
1801       shader.geometry_shader_instances = gs_num_invocations & 0x1F;
1802    }
1803 }
1804 
1805 static uint8_t
simd_width_to_gs_pack_mode(uint32_t width)1806 simd_width_to_gs_pack_mode(uint32_t width)
1807 {
1808    switch (width) {
1809    case 16:
1810       return V3D_PACK_MODE_16_WAY;
1811    case 8:
1812       return V3D_PACK_MODE_8_WAY;
1813    case 4:
1814       return V3D_PACK_MODE_4_WAY;
1815    case 1:
1816       return V3D_PACK_MODE_1_WAY;
1817    default:
1818       unreachable("Invalid SIMD width");
1819    };
1820 }
1821 
1822 static void
emit_tes_gs_shader_params(struct v3dv_job * job,uint32_t gs_simd,uint32_t gs_vpm_output_size,uint32_t gs_max_vpm_input_size_per_batch)1823 emit_tes_gs_shader_params(struct v3dv_job *job,
1824                           uint32_t gs_simd,
1825                           uint32_t gs_vpm_output_size,
1826                           uint32_t gs_max_vpm_input_size_per_batch)
1827 {
1828    cl_emit(&job->indirect, TESSELLATION_GEOMETRY_SHADER_PARAMS, shader) {
1829       shader.tcs_batch_flush_mode = V3D_TCS_FLUSH_MODE_FULLY_PACKED;
1830       shader.per_patch_data_column_depth = 1;
1831       shader.tcs_output_segment_size_in_sectors = 1;
1832       shader.tcs_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
1833       shader.tes_output_segment_size_in_sectors = 1;
1834       shader.tes_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
1835       shader.gs_output_segment_size_in_sectors = gs_vpm_output_size;
1836       shader.gs_output_segment_pack_mode =
1837          simd_width_to_gs_pack_mode(gs_simd);
1838       shader.tbg_max_patches_per_tcs_batch = 1;
1839       shader.tbg_max_extra_vertex_segs_for_patches_after_first = 0;
1840       shader.tbg_min_tcs_output_segments_required_in_play = 1;
1841       shader.tbg_min_per_patch_data_segments_required_in_play = 1;
1842       shader.tpg_max_patches_per_tes_batch = 1;
1843       shader.tpg_max_vertex_segments_per_tes_batch = 0;
1844       shader.tpg_max_tcs_output_segments_per_tes_batch = 1;
1845       shader.tpg_min_tes_output_segments_required_in_play = 1;
1846       shader.gbg_max_tes_output_vertex_segments_per_gs_batch =
1847          gs_max_vpm_input_size_per_batch;
1848       shader.gbg_min_gs_output_segments_required_in_play = 1;
1849    }
1850 }
1851 
1852 void
v3dX(cmd_buffer_emit_gl_shader_state)1853 v3dX(cmd_buffer_emit_gl_shader_state)(struct v3dv_cmd_buffer *cmd_buffer)
1854 {
1855    struct v3dv_job *job = cmd_buffer->state.job;
1856    assert(job);
1857 
1858    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1859    struct v3dv_pipeline *pipeline = state->gfx.pipeline;
1860    assert(pipeline);
1861 
1862    struct v3dv_shader_variant *vs_variant =
1863       pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
1864    struct v3d_vs_prog_data *prog_data_vs = vs_variant->prog_data.vs;
1865 
1866    struct v3dv_shader_variant *vs_bin_variant =
1867       pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
1868    struct v3d_vs_prog_data *prog_data_vs_bin = vs_bin_variant->prog_data.vs;
1869 
1870    struct v3dv_shader_variant *fs_variant =
1871       pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1872    struct v3d_fs_prog_data *prog_data_fs = fs_variant->prog_data.fs;
1873 
1874    struct v3dv_shader_variant *gs_variant = NULL;
1875    struct v3dv_shader_variant *gs_bin_variant = NULL;
1876    struct v3d_gs_prog_data *prog_data_gs = NULL;
1877    struct v3d_gs_prog_data *prog_data_gs_bin = NULL;
1878    if (pipeline->has_gs) {
1879       gs_variant =
1880          pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
1881       prog_data_gs = gs_variant->prog_data.gs;
1882 
1883       gs_bin_variant =
1884          pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
1885       prog_data_gs_bin = gs_bin_variant->prog_data.gs;
1886    }
1887 
1888    /* Update the cache dirty flag based on the shader progs data */
1889    job->tmu_dirty_rcl |= prog_data_vs_bin->base.tmu_dirty_rcl;
1890    job->tmu_dirty_rcl |= prog_data_vs->base.tmu_dirty_rcl;
1891    job->tmu_dirty_rcl |= prog_data_fs->base.tmu_dirty_rcl;
1892    if (pipeline->has_gs) {
1893       job->tmu_dirty_rcl |= prog_data_gs_bin->base.tmu_dirty_rcl;
1894       job->tmu_dirty_rcl |= prog_data_gs->base.tmu_dirty_rcl;
1895    }
1896 
1897    /* See GFXH-930 workaround below */
1898    uint32_t num_elements_to_emit = MAX2(pipeline->va_count, 1);
1899 
1900    uint32_t shader_state_record_length =
1901       cl_packet_length(GL_SHADER_STATE_RECORD);
1902    if (pipeline->has_gs) {
1903       shader_state_record_length +=
1904          cl_packet_length(GEOMETRY_SHADER_STATE_RECORD) +
1905          cl_packet_length(TESSELLATION_GEOMETRY_COMMON_PARAMS) +
1906          2 * cl_packet_length(TESSELLATION_GEOMETRY_SHADER_PARAMS);
1907    }
1908 
1909    uint32_t shader_rec_offset =
1910       v3dv_cl_ensure_space(&job->indirect,
1911                            shader_state_record_length +
1912                            num_elements_to_emit *
1913                            cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
1914                            32);
1915    v3dv_return_if_oom(cmd_buffer, NULL);
1916 
1917    struct v3dv_bo *assembly_bo = pipeline->shared_data->assembly_bo;
1918 
1919    if (pipeline->has_gs) {
1920       emit_gs_shader_state_record(job,
1921                                   assembly_bo,
1922                                   gs_bin_variant,
1923                                   cmd_buffer->state.uniforms.gs_bin,
1924                                   gs_variant,
1925                                   cmd_buffer->state.uniforms.gs);
1926 
1927       emit_tes_gs_common_params(job,
1928                                 prog_data_gs->out_prim_type,
1929                                 prog_data_gs->num_invocations);
1930 
1931       emit_tes_gs_shader_params(job,
1932                                 pipeline->vpm_cfg_bin.gs_width,
1933                                 pipeline->vpm_cfg_bin.Gd,
1934                                 pipeline->vpm_cfg_bin.Gv);
1935 
1936       emit_tes_gs_shader_params(job,
1937                                 pipeline->vpm_cfg.gs_width,
1938                                 pipeline->vpm_cfg.Gd,
1939                                 pipeline->vpm_cfg.Gv);
1940    }
1941 
1942    struct v3dv_bo *default_attribute_values =
1943       pipeline->default_attribute_values != NULL ?
1944       pipeline->default_attribute_values :
1945       pipeline->device->default_attribute_float;
1946 
1947    cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_RECORD,
1948                           pipeline->shader_state_record, shader) {
1949 
1950       /* FIXME: we are setting this values here and during the
1951        * prepacking. This is because both cl_emit_with_prepacked and v3dvx_pack
1952        * asserts for minimum values of these. It would be good to get
1953        * v3dvx_pack to assert on the final value if possible
1954        */
1955       shader.min_coord_shader_input_segments_required_in_play =
1956          pipeline->vpm_cfg_bin.As;
1957       shader.min_vertex_shader_input_segments_required_in_play =
1958          pipeline->vpm_cfg.As;
1959 
1960       shader.coordinate_shader_code_address =
1961          v3dv_cl_address(assembly_bo, vs_bin_variant->assembly_offset);
1962       shader.vertex_shader_code_address =
1963          v3dv_cl_address(assembly_bo, vs_variant->assembly_offset);
1964       shader.fragment_shader_code_address =
1965          v3dv_cl_address(assembly_bo, fs_variant->assembly_offset);
1966 
1967       shader.coordinate_shader_uniforms_address = cmd_buffer->state.uniforms.vs_bin;
1968       shader.vertex_shader_uniforms_address = cmd_buffer->state.uniforms.vs;
1969       shader.fragment_shader_uniforms_address = cmd_buffer->state.uniforms.fs;
1970 
1971       shader.address_of_default_attribute_values =
1972          v3dv_cl_address(default_attribute_values, 0);
1973 
1974       shader.any_shader_reads_hardware_written_primitive_id =
1975          (pipeline->has_gs && prog_data_gs->uses_pid) || prog_data_fs->uses_pid;
1976       shader.insert_primitive_id_as_first_varying_to_fragment_shader =
1977          !pipeline->has_gs && prog_data_fs->uses_pid;
1978    }
1979 
1980    /* Upload vertex element attributes (SHADER_STATE_ATTRIBUTE_RECORD) */
1981    bool cs_loaded_any = false;
1982    const bool cs_uses_builtins = prog_data_vs_bin->uses_iid ||
1983                                  prog_data_vs_bin->uses_biid ||
1984                                  prog_data_vs_bin->uses_vid;
1985    const uint32_t packet_length =
1986       cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
1987 
1988    uint32_t emitted_va_count = 0;
1989    for (uint32_t i = 0; emitted_va_count < pipeline->va_count; i++) {
1990       assert(i < MAX_VERTEX_ATTRIBS);
1991 
1992       if (pipeline->va[i].vk_format == VK_FORMAT_UNDEFINED)
1993          continue;
1994 
1995       const uint32_t binding = pipeline->va[i].binding;
1996 
1997       /* We store each vertex attribute in the array using its driver location
1998        * as index.
1999        */
2000       const uint32_t location = i;
2001 
2002       struct v3dv_vertex_binding *c_vb = &cmd_buffer->state.vertex_bindings[binding];
2003 
2004       cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD,
2005                              &pipeline->vertex_attrs[i * packet_length], attr) {
2006 
2007          assert(c_vb->buffer->mem->bo);
2008          attr.address = v3dv_cl_address(c_vb->buffer->mem->bo,
2009                                         c_vb->buffer->mem_offset +
2010                                         pipeline->va[i].offset +
2011                                         c_vb->offset);
2012 
2013          attr.number_of_values_read_by_coordinate_shader =
2014             prog_data_vs_bin->vattr_sizes[location];
2015          attr.number_of_values_read_by_vertex_shader =
2016             prog_data_vs->vattr_sizes[location];
2017 
2018          /* GFXH-930: At least one attribute must be enabled and read by CS
2019           * and VS.  If we have attributes being consumed by the VS but not
2020           * the CS, then set up a dummy load of the last attribute into the
2021           * CS's VPM inputs.  (Since CS is just dead-code-elimination compared
2022           * to VS, we can't have CS loading but not VS).
2023           *
2024           * GFXH-1602: first attribute must be active if using builtins.
2025           */
2026          if (prog_data_vs_bin->vattr_sizes[location])
2027             cs_loaded_any = true;
2028 
2029          if (i == 0 && cs_uses_builtins && !cs_loaded_any) {
2030             attr.number_of_values_read_by_coordinate_shader = 1;
2031             cs_loaded_any = true;
2032          } else if (i == pipeline->va_count - 1 && !cs_loaded_any) {
2033             attr.number_of_values_read_by_coordinate_shader = 1;
2034             cs_loaded_any = true;
2035          }
2036 
2037          attr.maximum_index = 0xffffff;
2038       }
2039 
2040       emitted_va_count++;
2041    }
2042 
2043    if (pipeline->va_count == 0) {
2044       /* GFXH-930: At least one attribute must be enabled and read
2045        * by CS and VS.  If we have no attributes being consumed by
2046        * the shader, set up a dummy to be loaded into the VPM.
2047        */
2048       cl_emit(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) {
2049          /* Valid address of data whose value will be unused. */
2050          attr.address = v3dv_cl_address(job->indirect.bo, 0);
2051 
2052          attr.type = ATTRIBUTE_FLOAT;
2053          attr.stride = 0;
2054          attr.vec_size = 1;
2055 
2056          attr.number_of_values_read_by_coordinate_shader = 1;
2057          attr.number_of_values_read_by_vertex_shader = 1;
2058       }
2059    }
2060 
2061    if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
2062       v3dv_cl_ensure_space_with_branch(&job->bcl,
2063                                        sizeof(pipeline->vcm_cache_size));
2064       v3dv_return_if_oom(cmd_buffer, NULL);
2065 
2066       cl_emit_prepacked(&job->bcl, &pipeline->vcm_cache_size);
2067    }
2068 
2069    v3dv_cl_ensure_space_with_branch(&job->bcl,
2070                                     cl_packet_length(GL_SHADER_STATE));
2071    v3dv_return_if_oom(cmd_buffer, NULL);
2072 
2073    if (pipeline->has_gs) {
2074       cl_emit(&job->bcl, GL_SHADER_STATE_INCLUDING_GS, state) {
2075          state.address = v3dv_cl_address(job->indirect.bo, shader_rec_offset);
2076          state.number_of_attribute_arrays = num_elements_to_emit;
2077       }
2078    } else {
2079       cl_emit(&job->bcl, GL_SHADER_STATE, state) {
2080          state.address = v3dv_cl_address(job->indirect.bo, shader_rec_offset);
2081          state.number_of_attribute_arrays = num_elements_to_emit;
2082       }
2083    }
2084 
2085    cmd_buffer->state.dirty &= ~(V3DV_CMD_DIRTY_VERTEX_BUFFER |
2086                                 V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
2087                                 V3DV_CMD_DIRTY_PUSH_CONSTANTS);
2088    cmd_buffer->state.dirty_descriptor_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
2089    cmd_buffer->state.dirty_push_constants_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
2090 }
2091 
2092 /* FIXME: C&P from v3dx_draw. Refactor to common place? */
2093 static uint32_t
v3d_hw_prim_type(enum pipe_prim_type prim_type)2094 v3d_hw_prim_type(enum pipe_prim_type prim_type)
2095 {
2096    switch (prim_type) {
2097    case PIPE_PRIM_POINTS:
2098    case PIPE_PRIM_LINES:
2099    case PIPE_PRIM_LINE_LOOP:
2100    case PIPE_PRIM_LINE_STRIP:
2101    case PIPE_PRIM_TRIANGLES:
2102    case PIPE_PRIM_TRIANGLE_STRIP:
2103    case PIPE_PRIM_TRIANGLE_FAN:
2104       return prim_type;
2105 
2106    case PIPE_PRIM_LINES_ADJACENCY:
2107    case PIPE_PRIM_LINE_STRIP_ADJACENCY:
2108    case PIPE_PRIM_TRIANGLES_ADJACENCY:
2109    case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
2110       return 8 + (prim_type - PIPE_PRIM_LINES_ADJACENCY);
2111 
2112    default:
2113       unreachable("Unsupported primitive type");
2114    }
2115 }
2116 
2117 void
v3dX(cmd_buffer_emit_draw)2118 v3dX(cmd_buffer_emit_draw)(struct v3dv_cmd_buffer *cmd_buffer,
2119                            struct v3dv_draw_info *info)
2120 {
2121    struct v3dv_job *job = cmd_buffer->state.job;
2122    assert(job);
2123 
2124    struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2125    struct v3dv_pipeline *pipeline = state->gfx.pipeline;
2126 
2127    assert(pipeline);
2128 
2129    uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2130 
2131    if (info->first_instance > 0) {
2132       v3dv_cl_ensure_space_with_branch(
2133          &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
2134       v3dv_return_if_oom(cmd_buffer, NULL);
2135 
2136       cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
2137          base.base_instance = info->first_instance;
2138          base.base_vertex = 0;
2139       }
2140    }
2141 
2142    if (info->instance_count > 1) {
2143       v3dv_cl_ensure_space_with_branch(
2144          &job->bcl, cl_packet_length(VERTEX_ARRAY_INSTANCED_PRIMS));
2145       v3dv_return_if_oom(cmd_buffer, NULL);
2146 
2147       cl_emit(&job->bcl, VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
2148          prim.mode = hw_prim_type;
2149          prim.index_of_first_vertex = info->first_vertex;
2150          prim.number_of_instances = info->instance_count;
2151          prim.instance_length = info->vertex_count;
2152       }
2153    } else {
2154       v3dv_cl_ensure_space_with_branch(
2155          &job->bcl, cl_packet_length(VERTEX_ARRAY_PRIMS));
2156       v3dv_return_if_oom(cmd_buffer, NULL);
2157       cl_emit(&job->bcl, VERTEX_ARRAY_PRIMS, prim) {
2158          prim.mode = hw_prim_type;
2159          prim.length = info->vertex_count;
2160          prim.index_of_first_vertex = info->first_vertex;
2161       }
2162    }
2163 }
2164 
2165 void
v3dX(cmd_buffer_emit_index_buffer)2166 v3dX(cmd_buffer_emit_index_buffer)(struct v3dv_cmd_buffer *cmd_buffer)
2167 {
2168    struct v3dv_job *job = cmd_buffer->state.job;
2169    assert(job);
2170 
2171    /* We flag all state as dirty when we create a new job so make sure we
2172     * have a valid index buffer before attempting to emit state for it.
2173     */
2174    struct v3dv_buffer *ibuffer =
2175       v3dv_buffer_from_handle(cmd_buffer->state.index_buffer.buffer);
2176    if (ibuffer) {
2177       v3dv_cl_ensure_space_with_branch(
2178          &job->bcl, cl_packet_length(INDEX_BUFFER_SETUP));
2179       v3dv_return_if_oom(cmd_buffer, NULL);
2180 
2181       const uint32_t offset = cmd_buffer->state.index_buffer.offset;
2182       cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
2183          ib.address = v3dv_cl_address(ibuffer->mem->bo,
2184                                       ibuffer->mem_offset + offset);
2185          ib.size = ibuffer->mem->bo->size;
2186       }
2187    }
2188 
2189    cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_INDEX_BUFFER;
2190 }
2191 
2192 void
v3dX(cmd_buffer_emit_draw_indexed)2193 v3dX(cmd_buffer_emit_draw_indexed)(struct v3dv_cmd_buffer *cmd_buffer,
2194                                    uint32_t indexCount,
2195                                    uint32_t instanceCount,
2196                                    uint32_t firstIndex,
2197                                    int32_t vertexOffset,
2198                                    uint32_t firstInstance)
2199 {
2200    struct v3dv_job *job = cmd_buffer->state.job;
2201    assert(job);
2202 
2203    const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2204    uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2205    uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
2206    uint32_t index_offset = firstIndex * cmd_buffer->state.index_buffer.index_size;
2207 
2208    if (vertexOffset != 0 || firstInstance != 0) {
2209       v3dv_cl_ensure_space_with_branch(
2210          &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
2211       v3dv_return_if_oom(cmd_buffer, NULL);
2212 
2213       cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
2214          base.base_instance = firstInstance;
2215          base.base_vertex = vertexOffset;
2216       }
2217    }
2218 
2219    if (instanceCount == 1) {
2220       v3dv_cl_ensure_space_with_branch(
2221          &job->bcl, cl_packet_length(INDEXED_PRIM_LIST));
2222       v3dv_return_if_oom(cmd_buffer, NULL);
2223 
2224       cl_emit(&job->bcl, INDEXED_PRIM_LIST, prim) {
2225          prim.index_type = index_type;
2226          prim.length = indexCount;
2227          prim.index_offset = index_offset;
2228          prim.mode = hw_prim_type;
2229          prim.enable_primitive_restarts = pipeline->primitive_restart;
2230       }
2231    } else if (instanceCount > 1) {
2232       v3dv_cl_ensure_space_with_branch(
2233          &job->bcl, cl_packet_length(INDEXED_INSTANCED_PRIM_LIST));
2234       v3dv_return_if_oom(cmd_buffer, NULL);
2235 
2236       cl_emit(&job->bcl, INDEXED_INSTANCED_PRIM_LIST, prim) {
2237          prim.index_type = index_type;
2238          prim.index_offset = index_offset;
2239          prim.mode = hw_prim_type;
2240          prim.enable_primitive_restarts = pipeline->primitive_restart;
2241          prim.number_of_instances = instanceCount;
2242          prim.instance_length = indexCount;
2243       }
2244    }
2245 }
2246 
2247 void
v3dX(cmd_buffer_emit_draw_indirect)2248 v3dX(cmd_buffer_emit_draw_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
2249                                     struct v3dv_buffer *buffer,
2250                                     VkDeviceSize offset,
2251                                     uint32_t drawCount,
2252                                     uint32_t stride)
2253 {
2254    struct v3dv_job *job = cmd_buffer->state.job;
2255    assert(job);
2256 
2257    const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2258    uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2259 
2260    v3dv_cl_ensure_space_with_branch(
2261       &job->bcl, cl_packet_length(INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS));
2262    v3dv_return_if_oom(cmd_buffer, NULL);
2263 
2264    cl_emit(&job->bcl, INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
2265       prim.mode = hw_prim_type;
2266       prim.number_of_draw_indirect_array_records = drawCount;
2267       prim.stride_in_multiples_of_4_bytes = stride >> 2;
2268       prim.address = v3dv_cl_address(buffer->mem->bo,
2269                                      buffer->mem_offset + offset);
2270    }
2271 }
2272 
2273 void
v3dX(cmd_buffer_emit_indexed_indirect)2274 v3dX(cmd_buffer_emit_indexed_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
2275                                        struct v3dv_buffer *buffer,
2276                                        VkDeviceSize offset,
2277                                        uint32_t drawCount,
2278                                        uint32_t stride)
2279 {
2280    struct v3dv_job *job = cmd_buffer->state.job;
2281    assert(job);
2282 
2283    const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2284    uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2285    uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
2286 
2287    v3dv_cl_ensure_space_with_branch(
2288       &job->bcl, cl_packet_length(INDIRECT_INDEXED_INSTANCED_PRIM_LIST));
2289    v3dv_return_if_oom(cmd_buffer, NULL);
2290 
2291    cl_emit(&job->bcl, INDIRECT_INDEXED_INSTANCED_PRIM_LIST, prim) {
2292       prim.index_type = index_type;
2293       prim.mode = hw_prim_type;
2294       prim.enable_primitive_restarts = pipeline->primitive_restart;
2295       prim.number_of_draw_indirect_indexed_records = drawCount;
2296       prim.stride_in_multiples_of_4_bytes = stride >> 2;
2297       prim.address = v3dv_cl_address(buffer->mem->bo,
2298                                      buffer->mem_offset + offset);
2299    }
2300 }
2301 
2302 void
v3dX(cmd_buffer_render_pass_setup_render_target)2303 v3dX(cmd_buffer_render_pass_setup_render_target)(struct v3dv_cmd_buffer *cmd_buffer,
2304                                                  int rt,
2305                                                  uint32_t *rt_bpp,
2306                                                  uint32_t *rt_type,
2307                                                  uint32_t *rt_clamp)
2308 {
2309    const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2310 
2311    assert(state->subpass_idx < state->pass->subpass_count);
2312    const struct v3dv_subpass *subpass =
2313       &state->pass->subpasses[state->subpass_idx];
2314 
2315    if (rt >= subpass->color_count)
2316       return;
2317 
2318    struct v3dv_subpass_attachment *attachment = &subpass->color_attachments[rt];
2319    const uint32_t attachment_idx = attachment->attachment;
2320    if (attachment_idx == VK_ATTACHMENT_UNUSED)
2321       return;
2322 
2323    assert(attachment_idx < state->framebuffer->attachment_count &&
2324           attachment_idx < state->attachment_alloc_count);
2325    struct v3dv_image_view *iview = state->attachments[attachment_idx].image_view;
2326    assert(iview->vk.aspects & VK_IMAGE_ASPECT_COLOR_BIT);
2327 
2328    *rt_bpp = iview->internal_bpp;
2329    *rt_type = iview->internal_type;
2330    if (vk_format_is_int(iview->vk.view_format))
2331       *rt_clamp = V3D_RENDER_TARGET_CLAMP_INT;
2332    else if (vk_format_is_srgb(iview->vk.view_format))
2333       *rt_clamp = V3D_RENDER_TARGET_CLAMP_NORM;
2334    else
2335       *rt_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
2336 }
2337