1 /*
2 * Copyright © 2021 Raspberry Pi
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3dv_private.h"
25 #include "broadcom/common/v3d_macros.h"
26 #include "broadcom/cle/v3dx_pack.h"
27 #include "broadcom/compiler/v3d_compiler.h"
28
29 #include "util/half_float.h"
30 #include "vulkan/util/vk_format.h"
31 #include "util/u_pack_color.h"
32
33 #include "vk_format_info.h"
34
35 void
v3dX(job_emit_binning_flush)36 v3dX(job_emit_binning_flush)(struct v3dv_job *job)
37 {
38 assert(job);
39
40 v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(FLUSH));
41 v3dv_return_if_oom(NULL, job);
42
43 cl_emit(&job->bcl, FLUSH, flush);
44 }
45
46 void
v3dX(job_emit_binning_prolog)47 v3dX(job_emit_binning_prolog)(struct v3dv_job *job,
48 const struct v3dv_frame_tiling *tiling,
49 uint32_t layers)
50 {
51 /* This must go before the binning mode configuration. It is
52 * required for layered framebuffers to work.
53 */
54 cl_emit(&job->bcl, NUMBER_OF_LAYERS, config) {
55 config.number_of_layers = layers;
56 }
57
58 cl_emit(&job->bcl, TILE_BINNING_MODE_CFG, config) {
59 config.width_in_pixels = tiling->width;
60 config.height_in_pixels = tiling->height;
61 config.number_of_render_targets = MAX2(tiling->render_target_count, 1);
62 config.multisample_mode_4x = tiling->msaa;
63 config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
64 }
65
66 /* There's definitely nothing in the VCD cache we want. */
67 cl_emit(&job->bcl, FLUSH_VCD_CACHE, bin);
68
69 /* "Binning mode lists must have a Start Tile Binning item (6) after
70 * any prefix state data before the binning list proper starts."
71 */
72 cl_emit(&job->bcl, START_TILE_BINNING, bin);
73 }
74
75 void
v3dX(cmd_buffer_end_render_pass_secondary)76 v3dX(cmd_buffer_end_render_pass_secondary)(struct v3dv_cmd_buffer *cmd_buffer)
77 {
78 assert(cmd_buffer->state.job);
79 v3dv_cl_ensure_space_with_branch(&cmd_buffer->state.job->bcl,
80 cl_packet_length(RETURN_FROM_SUB_LIST));
81 v3dv_return_if_oom(cmd_buffer, NULL);
82 cl_emit(&cmd_buffer->state.job->bcl, RETURN_FROM_SUB_LIST, ret);
83 }
84
85 void
v3dX(job_emit_clip_window)86 v3dX(job_emit_clip_window)(struct v3dv_job *job, const VkRect2D *rect)
87 {
88 assert(job);
89
90 v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CLIP_WINDOW));
91 v3dv_return_if_oom(NULL, job);
92
93 cl_emit(&job->bcl, CLIP_WINDOW, clip) {
94 clip.clip_window_left_pixel_coordinate = rect->offset.x;
95 clip.clip_window_bottom_pixel_coordinate = rect->offset.y;
96 clip.clip_window_width_in_pixels = rect->extent.width;
97 clip.clip_window_height_in_pixels = rect->extent.height;
98 }
99 }
100
101 static void
cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,struct v3dv_image_view * iview,uint32_t layer,uint32_t buffer)102 cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer *cmd_buffer,
103 struct v3dv_cl *cl,
104 struct v3dv_image_view *iview,
105 uint32_t layer,
106 uint32_t buffer)
107 {
108 const struct v3dv_image *image = (struct v3dv_image *) iview->vk.image;
109 const struct v3d_resource_slice *slice =
110 &image->slices[iview->vk.base_mip_level];
111 uint32_t layer_offset =
112 v3dv_layer_offset(image, iview->vk.base_mip_level,
113 iview->vk.base_array_layer + layer);
114
115 cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
116 load.buffer_to_load = buffer;
117 load.address = v3dv_cl_address(image->mem->bo, layer_offset);
118
119 load.input_image_format = iview->format->rt_type;
120 load.r_b_swap = iview->swap_rb;
121 load.memory_format = slice->tiling;
122
123 if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
124 slice->tiling == V3D_TILING_UIF_XOR) {
125 load.height_in_ub_or_stride =
126 slice->padded_height_of_output_image_in_uif_blocks;
127 } else if (slice->tiling == V3D_TILING_RASTER) {
128 load.height_in_ub_or_stride = slice->stride;
129 }
130
131 if (image->vk.samples > VK_SAMPLE_COUNT_1_BIT)
132 load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
133 else
134 load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
135 }
136 }
137
138 static bool
check_needs_load(const struct v3dv_cmd_buffer_state * state,VkImageAspectFlags aspect,uint32_t first_subpass_idx,VkAttachmentLoadOp load_op)139 check_needs_load(const struct v3dv_cmd_buffer_state *state,
140 VkImageAspectFlags aspect,
141 uint32_t first_subpass_idx,
142 VkAttachmentLoadOp load_op)
143 {
144 /* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
145 * testing does not exist in the image.
146 */
147 if (!aspect)
148 return false;
149
150 /* Attachment (or view) load operations apply on the first subpass that
151 * uses the attachment (or view), otherwise we always need to load.
152 */
153 if (state->job->first_subpass > first_subpass_idx)
154 return true;
155
156 /* If the job is continuing a subpass started in another job, we always
157 * need to load.
158 */
159 if (state->job->is_subpass_continue)
160 return true;
161
162 /* If the area is not aligned to tile boundaries, we always need to load */
163 if (!state->tile_aligned_render_area)
164 return true;
165
166 /* The attachment load operations must be LOAD */
167 return load_op == VK_ATTACHMENT_LOAD_OP_LOAD;
168 }
169
170 static inline uint32_t
v3dv_zs_buffer(bool depth,bool stencil)171 v3dv_zs_buffer(bool depth, bool stencil)
172 {
173 if (depth && stencil)
174 return ZSTENCIL;
175 else if (depth)
176 return Z;
177 else if (stencil)
178 return STENCIL;
179 return NONE;
180 }
181
182 static void
cmd_buffer_render_pass_emit_loads(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,uint32_t layer)183 cmd_buffer_render_pass_emit_loads(struct v3dv_cmd_buffer *cmd_buffer,
184 struct v3dv_cl *cl,
185 uint32_t layer)
186 {
187 const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
188 const struct v3dv_framebuffer *framebuffer = state->framebuffer;
189 const struct v3dv_render_pass *pass = state->pass;
190 const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
191
192 assert(!pass->multiview_enabled || layer < MAX_MULTIVIEW_VIEW_COUNT);
193
194 for (uint32_t i = 0; i < subpass->color_count; i++) {
195 uint32_t attachment_idx = subpass->color_attachments[i].attachment;
196
197 if (attachment_idx == VK_ATTACHMENT_UNUSED)
198 continue;
199
200 const struct v3dv_render_pass_attachment *attachment =
201 &state->pass->attachments[attachment_idx];
202
203 /* According to the Vulkan spec:
204 *
205 * "The load operation for each sample in an attachment happens before
206 * any recorded command which accesses the sample in the first subpass
207 * where the attachment is used."
208 *
209 * If the load operation is CLEAR, we must only clear once on the first
210 * subpass that uses the attachment (and in that case we don't LOAD).
211 * After that, we always want to load so we don't lose any rendering done
212 * by a previous subpass to the same attachment. We also want to load
213 * if the current job is continuing subpass work started by a previous
214 * job, for the same reason.
215 *
216 * If the render area is not aligned to tile boundaries then we have
217 * tiles which are partially covered by it. In this case, we need to
218 * load the tiles so we can preserve the pixels that are outside the
219 * render area for any such tiles.
220 */
221 uint32_t first_subpass = !pass->multiview_enabled ?
222 attachment->first_subpass :
223 attachment->views[layer].first_subpass;
224
225 bool needs_load = check_needs_load(state,
226 VK_IMAGE_ASPECT_COLOR_BIT,
227 first_subpass,
228 attachment->desc.loadOp);
229 if (needs_load) {
230 struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
231 cmd_buffer_render_pass_emit_load(cmd_buffer, cl, iview,
232 layer, RENDER_TARGET_0 + i);
233 }
234 }
235
236 uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
237 if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
238 const struct v3dv_render_pass_attachment *ds_attachment =
239 &state->pass->attachments[ds_attachment_idx];
240
241 const VkImageAspectFlags ds_aspects =
242 vk_format_aspects(ds_attachment->desc.format);
243
244 uint32_t ds_first_subpass = !pass->multiview_enabled ?
245 ds_attachment->first_subpass :
246 ds_attachment->views[layer].first_subpass;
247
248 const bool needs_depth_load =
249 check_needs_load(state,
250 ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
251 ds_first_subpass,
252 ds_attachment->desc.loadOp);
253
254 const bool needs_stencil_load =
255 check_needs_load(state,
256 ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
257 ds_first_subpass,
258 ds_attachment->desc.stencilLoadOp);
259
260 if (needs_depth_load || needs_stencil_load) {
261 struct v3dv_image_view *iview =
262 framebuffer->attachments[ds_attachment_idx];
263 /* From the Vulkan spec:
264 *
265 * "When an image view of a depth/stencil image is used as a
266 * depth/stencil framebuffer attachment, the aspectMask is ignored
267 * and both depth and stencil image subresources are used."
268 *
269 * So we ignore the aspects from the subresource range of the image
270 * view for the depth/stencil attachment, but we still need to restrict
271 * the to aspects compatible with the render pass and the image.
272 */
273 const uint32_t zs_buffer =
274 v3dv_zs_buffer(needs_depth_load, needs_stencil_load);
275 cmd_buffer_render_pass_emit_load(cmd_buffer, cl,
276 iview, layer, zs_buffer);
277 }
278 }
279
280 cl_emit(cl, END_OF_LOADS, end);
281 }
282
283 static void
cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,uint32_t attachment_idx,uint32_t layer,uint32_t buffer,bool clear,bool is_multisample_resolve)284 cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer *cmd_buffer,
285 struct v3dv_cl *cl,
286 uint32_t attachment_idx,
287 uint32_t layer,
288 uint32_t buffer,
289 bool clear,
290 bool is_multisample_resolve)
291 {
292 const struct v3dv_image_view *iview =
293 cmd_buffer->state.framebuffer->attachments[attachment_idx];
294 const struct v3dv_image *image = (struct v3dv_image *) iview->vk.image;
295 const struct v3d_resource_slice *slice =
296 &image->slices[iview->vk.base_mip_level];
297 uint32_t layer_offset = v3dv_layer_offset(image,
298 iview->vk.base_mip_level,
299 iview->vk.base_array_layer + layer);
300
301 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
302 store.buffer_to_store = buffer;
303 store.address = v3dv_cl_address(image->mem->bo, layer_offset);
304 store.clear_buffer_being_stored = clear;
305
306 store.output_image_format = iview->format->rt_type;
307 store.r_b_swap = iview->swap_rb;
308 store.memory_format = slice->tiling;
309
310 if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
311 slice->tiling == V3D_TILING_UIF_XOR) {
312 store.height_in_ub_or_stride =
313 slice->padded_height_of_output_image_in_uif_blocks;
314 } else if (slice->tiling == V3D_TILING_RASTER) {
315 store.height_in_ub_or_stride = slice->stride;
316 }
317
318 if (image->vk.samples > VK_SAMPLE_COUNT_1_BIT)
319 store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
320 else if (is_multisample_resolve)
321 store.decimate_mode = V3D_DECIMATE_MODE_4X;
322 else
323 store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
324 }
325 }
326
327 static bool
check_needs_clear(const struct v3dv_cmd_buffer_state * state,VkImageAspectFlags aspect,uint32_t first_subpass_idx,VkAttachmentLoadOp load_op,bool do_clear_with_draw)328 check_needs_clear(const struct v3dv_cmd_buffer_state *state,
329 VkImageAspectFlags aspect,
330 uint32_t first_subpass_idx,
331 VkAttachmentLoadOp load_op,
332 bool do_clear_with_draw)
333 {
334 /* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
335 * testing does not exist in the image.
336 */
337 if (!aspect)
338 return false;
339
340 /* If the aspect needs to be cleared with a draw call then we won't emit
341 * the clear here.
342 */
343 if (do_clear_with_draw)
344 return false;
345
346 /* If this is resuming a subpass started with another job, then attachment
347 * load operations don't apply.
348 */
349 if (state->job->is_subpass_continue)
350 return false;
351
352 /* If the render area is not aligned to tile boudaries we can't use the
353 * TLB for a clear.
354 */
355 if (!state->tile_aligned_render_area)
356 return false;
357
358 /* If this job is running in a subpass other than the first subpass in
359 * which this attachment (or view) is used then attachment load operations
360 * don't apply.
361 */
362 if (state->job->first_subpass != first_subpass_idx)
363 return false;
364
365 /* The attachment load operation must be CLEAR */
366 return load_op == VK_ATTACHMENT_LOAD_OP_CLEAR;
367 }
368
369 static bool
check_needs_store(const struct v3dv_cmd_buffer_state * state,VkImageAspectFlags aspect,uint32_t last_subpass_idx,VkAttachmentStoreOp store_op)370 check_needs_store(const struct v3dv_cmd_buffer_state *state,
371 VkImageAspectFlags aspect,
372 uint32_t last_subpass_idx,
373 VkAttachmentStoreOp store_op)
374 {
375 /* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
376 * testing does not exist in the image.
377 */
378 if (!aspect)
379 return false;
380
381 /* Attachment (or view) store operations only apply on the last subpass
382 * where the attachment (or view) is used, in other subpasses we always
383 * need to store.
384 */
385 if (state->subpass_idx < last_subpass_idx)
386 return true;
387
388 /* Attachment store operations only apply on the last job we emit on the the
389 * last subpass where the attachment is used, otherwise we always need to
390 * store.
391 */
392 if (!state->job->is_subpass_finish)
393 return true;
394
395 /* The attachment store operation must be STORE */
396 return store_op == VK_ATTACHMENT_STORE_OP_STORE;
397 }
398
399 static void
cmd_buffer_render_pass_emit_stores(struct v3dv_cmd_buffer * cmd_buffer,struct v3dv_cl * cl,uint32_t layer)400 cmd_buffer_render_pass_emit_stores(struct v3dv_cmd_buffer *cmd_buffer,
401 struct v3dv_cl *cl,
402 uint32_t layer)
403 {
404 struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
405 struct v3dv_render_pass *pass = state->pass;
406 const struct v3dv_subpass *subpass =
407 &pass->subpasses[state->subpass_idx];
408
409 bool has_stores = false;
410 bool use_global_zs_clear = false;
411 bool use_global_rt_clear = false;
412
413 assert(!pass->multiview_enabled || layer < MAX_MULTIVIEW_VIEW_COUNT);
414
415 /* FIXME: separate stencil */
416 uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
417 if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
418 const struct v3dv_render_pass_attachment *ds_attachment =
419 &state->pass->attachments[ds_attachment_idx];
420
421 assert(state->job->first_subpass >= ds_attachment->first_subpass);
422 assert(state->subpass_idx >= ds_attachment->first_subpass);
423 assert(state->subpass_idx <= ds_attachment->last_subpass);
424
425 /* From the Vulkan spec, VkImageSubresourceRange:
426 *
427 * "When an image view of a depth/stencil image is used as a
428 * depth/stencil framebuffer attachment, the aspectMask is ignored
429 * and both depth and stencil image subresources are used."
430 *
431 * So we ignore the aspects from the subresource range of the image
432 * view for the depth/stencil attachment, but we still need to restrict
433 * the to aspects compatible with the render pass and the image.
434 */
435 const VkImageAspectFlags aspects =
436 vk_format_aspects(ds_attachment->desc.format);
437
438 /* Only clear once on the first subpass that uses the attachment */
439 uint32_t ds_first_subpass = !state->pass->multiview_enabled ?
440 ds_attachment->first_subpass :
441 ds_attachment->views[layer].first_subpass;
442
443 bool needs_depth_clear =
444 check_needs_clear(state,
445 aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
446 ds_first_subpass,
447 ds_attachment->desc.loadOp,
448 subpass->do_depth_clear_with_draw);
449
450 bool needs_stencil_clear =
451 check_needs_clear(state,
452 aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
453 ds_first_subpass,
454 ds_attachment->desc.stencilLoadOp,
455 subpass->do_stencil_clear_with_draw);
456
457 /* Skip the last store if it is not required */
458 uint32_t ds_last_subpass = !pass->multiview_enabled ?
459 ds_attachment->last_subpass :
460 ds_attachment->views[layer].last_subpass;
461
462 bool needs_depth_store =
463 check_needs_store(state,
464 aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
465 ds_last_subpass,
466 ds_attachment->desc.storeOp);
467
468 bool needs_stencil_store =
469 check_needs_store(state,
470 aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
471 ds_last_subpass,
472 ds_attachment->desc.stencilStoreOp);
473
474 /* GFXH-1689: The per-buffer store command's clear buffer bit is broken
475 * for depth/stencil.
476 *
477 * There used to be some confusion regarding the Clear Tile Buffers
478 * Z/S bit also being broken, but we confirmed with Broadcom that this
479 * is not the case, it was just that some other hardware bugs (that we
480 * need to work around, such as GFXH-1461) could cause this bit to behave
481 * incorrectly.
482 *
483 * There used to be another issue where the RTs bit in the Clear Tile
484 * Buffers packet also cleared Z/S, but Broadcom confirmed this is
485 * fixed since V3D 4.1.
486 *
487 * So if we have to emit a clear of depth or stencil we don't use
488 * the per-buffer store clear bit, even if we need to store the buffers,
489 * instead we always have to use the Clear Tile Buffers Z/S bit.
490 * If we have configured the job to do early Z/S clearing, then we
491 * don't want to emit any Clear Tile Buffers command at all here.
492 *
493 * Note that GFXH-1689 is not reproduced in the simulator, where
494 * using the clear buffer bit in depth/stencil stores works fine.
495 */
496 use_global_zs_clear = !state->job->early_zs_clear &&
497 (needs_depth_clear || needs_stencil_clear);
498 if (needs_depth_store || needs_stencil_store) {
499 const uint32_t zs_buffer =
500 v3dv_zs_buffer(needs_depth_store, needs_stencil_store);
501 cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
502 ds_attachment_idx, layer,
503 zs_buffer, false, false);
504 has_stores = true;
505 }
506 }
507
508 for (uint32_t i = 0; i < subpass->color_count; i++) {
509 uint32_t attachment_idx = subpass->color_attachments[i].attachment;
510
511 if (attachment_idx == VK_ATTACHMENT_UNUSED)
512 continue;
513
514 const struct v3dv_render_pass_attachment *attachment =
515 &state->pass->attachments[attachment_idx];
516
517 assert(state->job->first_subpass >= attachment->first_subpass);
518 assert(state->subpass_idx >= attachment->first_subpass);
519 assert(state->subpass_idx <= attachment->last_subpass);
520
521 /* Only clear once on the first subpass that uses the attachment */
522 uint32_t first_subpass = !pass->multiview_enabled ?
523 attachment->first_subpass :
524 attachment->views[layer].first_subpass;
525
526 bool needs_clear =
527 check_needs_clear(state,
528 VK_IMAGE_ASPECT_COLOR_BIT,
529 first_subpass,
530 attachment->desc.loadOp,
531 false);
532
533 /* Skip the last store if it is not required */
534 uint32_t last_subpass = !pass->multiview_enabled ?
535 attachment->last_subpass :
536 attachment->views[layer].last_subpass;
537
538 bool needs_store =
539 check_needs_store(state,
540 VK_IMAGE_ASPECT_COLOR_BIT,
541 last_subpass,
542 attachment->desc.storeOp);
543
544 /* If we need to resolve this attachment emit that store first. Notice
545 * that we must not request a tile buffer clear here in that case, since
546 * that would clear the tile buffer before we get to emit the actual
547 * color attachment store below, since the clear happens after the
548 * store is completed.
549 *
550 * If the attachment doesn't support TLB resolves then we will have to
551 * fallback to doing the resolve in a shader separately after this
552 * job, so we will need to store the multisampled sttachment even if that
553 * wansn't requested by the client.
554 */
555 const bool needs_resolve =
556 subpass->resolve_attachments &&
557 subpass->resolve_attachments[i].attachment != VK_ATTACHMENT_UNUSED;
558 if (needs_resolve && attachment->use_tlb_resolve) {
559 const uint32_t resolve_attachment_idx =
560 subpass->resolve_attachments[i].attachment;
561 cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
562 resolve_attachment_idx, layer,
563 RENDER_TARGET_0 + i,
564 false, true);
565 has_stores = true;
566 } else if (needs_resolve) {
567 needs_store = true;
568 }
569
570 /* Emit the color attachment store if needed */
571 if (needs_store) {
572 cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
573 attachment_idx, layer,
574 RENDER_TARGET_0 + i,
575 needs_clear && !use_global_rt_clear,
576 false);
577 has_stores = true;
578 } else if (needs_clear) {
579 use_global_rt_clear = true;
580 }
581 }
582
583 /* We always need to emit at least one dummy store */
584 if (!has_stores) {
585 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
586 store.buffer_to_store = NONE;
587 }
588 }
589
590 /* If we have any depth/stencil clears we can't use the per-buffer clear
591 * bit and instead we have to emit a single clear of all tile buffers.
592 */
593 if (use_global_zs_clear || use_global_rt_clear) {
594 cl_emit(cl, CLEAR_TILE_BUFFERS, clear) {
595 clear.clear_z_stencil_buffer = use_global_zs_clear;
596 clear.clear_all_render_targets = use_global_rt_clear;
597 }
598 }
599 }
600
601 static void
cmd_buffer_render_pass_emit_per_tile_rcl(struct v3dv_cmd_buffer * cmd_buffer,uint32_t layer)602 cmd_buffer_render_pass_emit_per_tile_rcl(struct v3dv_cmd_buffer *cmd_buffer,
603 uint32_t layer)
604 {
605 struct v3dv_job *job = cmd_buffer->state.job;
606 assert(job);
607
608 /* Emit the generic list in our indirect state -- the rcl will just
609 * have pointers into it.
610 */
611 struct v3dv_cl *cl = &job->indirect;
612 v3dv_cl_ensure_space(cl, 200, 1);
613 v3dv_return_if_oom(cmd_buffer, NULL);
614
615 struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
616
617 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
618
619 cmd_buffer_render_pass_emit_loads(cmd_buffer, cl, layer);
620
621 /* The binner starts out writing tiles assuming that the initial mode
622 * is triangles, so make sure that's the case.
623 */
624 cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
625 fmt.primitive_type = LIST_TRIANGLES;
626 }
627
628 /* PTB assumes that value to be 0, but hw will not set it. */
629 cl_emit(cl, SET_INSTANCEID, set) {
630 set.instance_id = 0;
631 }
632
633 cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
634
635 cmd_buffer_render_pass_emit_stores(cmd_buffer, cl, layer);
636
637 cl_emit(cl, END_OF_TILE_MARKER, end);
638
639 cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
640
641 cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
642 branch.start = tile_list_start;
643 branch.end = v3dv_cl_get_address(cl);
644 }
645 }
646
647 static void
cmd_buffer_emit_render_pass_layer_rcl(struct v3dv_cmd_buffer * cmd_buffer,uint32_t layer)648 cmd_buffer_emit_render_pass_layer_rcl(struct v3dv_cmd_buffer *cmd_buffer,
649 uint32_t layer)
650 {
651 const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
652
653 struct v3dv_job *job = cmd_buffer->state.job;
654 struct v3dv_cl *rcl = &job->rcl;
655
656 /* If doing multicore binning, we would need to initialize each
657 * core's tile list here.
658 */
659 const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
660 const uint32_t tile_alloc_offset =
661 64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
662 cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
663 list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
664 }
665
666 cmd_buffer_render_pass_emit_per_tile_rcl(cmd_buffer, layer);
667
668 uint32_t supertile_w_in_pixels =
669 tiling->tile_width * tiling->supertile_width;
670 uint32_t supertile_h_in_pixels =
671 tiling->tile_height * tiling->supertile_height;
672 const uint32_t min_x_supertile =
673 state->render_area.offset.x / supertile_w_in_pixels;
674 const uint32_t min_y_supertile =
675 state->render_area.offset.y / supertile_h_in_pixels;
676
677 uint32_t max_render_x = state->render_area.offset.x;
678 if (state->render_area.extent.width > 0)
679 max_render_x += state->render_area.extent.width - 1;
680 uint32_t max_render_y = state->render_area.offset.y;
681 if (state->render_area.extent.height > 0)
682 max_render_y += state->render_area.extent.height - 1;
683 const uint32_t max_x_supertile = max_render_x / supertile_w_in_pixels;
684 const uint32_t max_y_supertile = max_render_y / supertile_h_in_pixels;
685
686 for (int y = min_y_supertile; y <= max_y_supertile; y++) {
687 for (int x = min_x_supertile; x <= max_x_supertile; x++) {
688 cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
689 coords.column_number_in_supertiles = x;
690 coords.row_number_in_supertiles = y;
691 }
692 }
693 }
694 }
695
696 static void
set_rcl_early_z_config(struct v3dv_job * job,bool * early_z_disable,uint32_t * early_z_test_and_update_direction)697 set_rcl_early_z_config(struct v3dv_job *job,
698 bool *early_z_disable,
699 uint32_t *early_z_test_and_update_direction)
700 {
701 /* If this is true then we have not emitted any draw calls in this job
702 * and we don't get any benefits form early Z.
703 */
704 if (!job->decided_global_ez_enable) {
705 assert(job->draw_count == 0);
706 *early_z_disable = true;
707 return;
708 }
709
710 switch (job->first_ez_state) {
711 case V3D_EZ_UNDECIDED:
712 case V3D_EZ_LT_LE:
713 *early_z_disable = false;
714 *early_z_test_and_update_direction = EARLY_Z_DIRECTION_LT_LE;
715 break;
716 case V3D_EZ_GT_GE:
717 *early_z_disable = false;
718 *early_z_test_and_update_direction = EARLY_Z_DIRECTION_GT_GE;
719 break;
720 case V3D_EZ_DISABLED:
721 *early_z_disable = true;
722 break;
723 }
724 }
725
726 void
v3dX(cmd_buffer_emit_render_pass_rcl)727 v3dX(cmd_buffer_emit_render_pass_rcl)(struct v3dv_cmd_buffer *cmd_buffer)
728 {
729 struct v3dv_job *job = cmd_buffer->state.job;
730 assert(job);
731
732 const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
733 const struct v3dv_framebuffer *framebuffer = state->framebuffer;
734
735 /* We can't emit the RCL until we have a framebuffer, which we may not have
736 * if we are recording a secondary command buffer. In that case, we will
737 * have to wait until vkCmdExecuteCommands is called from a primary command
738 * buffer.
739 */
740 if (!framebuffer) {
741 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
742 return;
743 }
744
745 const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
746
747 const uint32_t fb_layers = job->frame_tiling.layers;
748
749 v3dv_cl_ensure_space_with_branch(&job->rcl, 200 +
750 MAX2(fb_layers, 1) * 256 *
751 cl_packet_length(SUPERTILE_COORDINATES));
752 v3dv_return_if_oom(cmd_buffer, NULL);
753
754 assert(state->subpass_idx < state->pass->subpass_count);
755 const struct v3dv_render_pass *pass = state->pass;
756 const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
757 struct v3dv_cl *rcl = &job->rcl;
758
759 /* Comon config must be the first TILE_RENDERING_MODE_CFG and
760 * Z_STENCIL_CLEAR_VALUES must be last. The ones in between are optional
761 * updates to the previous HW state.
762 */
763 bool do_early_zs_clear = false;
764 const uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
765 cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
766 config.image_width_pixels = framebuffer->width;
767 config.image_height_pixels = framebuffer->height;
768 config.number_of_render_targets = MAX2(subpass->color_count, 1);
769 config.multisample_mode_4x = tiling->msaa;
770 config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
771
772 if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
773 const struct v3dv_image_view *iview =
774 framebuffer->attachments[ds_attachment_idx];
775 config.internal_depth_type = iview->internal_type;
776
777 set_rcl_early_z_config(job,
778 &config.early_z_disable,
779 &config.early_z_test_and_update_direction);
780
781 /* Early-Z/S clear can be enabled if the job is clearing and not
782 * storing (or loading) depth. If a stencil aspect is also present
783 * we have the same requirements for it, however, in this case we
784 * can accept stencil loadOp DONT_CARE as well, so instead of
785 * checking that stencil is cleared we check that is not loaded.
786 *
787 * Early-Z/S clearing is independent of Early Z/S testing, so it is
788 * possible to enable one but not the other so long as their
789 * respective requirements are met.
790 */
791 struct v3dv_render_pass_attachment *ds_attachment =
792 &pass->attachments[ds_attachment_idx];
793
794 const VkImageAspectFlags ds_aspects =
795 vk_format_aspects(ds_attachment->desc.format);
796
797 bool needs_depth_clear =
798 check_needs_clear(state,
799 ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
800 ds_attachment->first_subpass,
801 ds_attachment->desc.loadOp,
802 subpass->do_depth_clear_with_draw);
803
804 bool needs_depth_store =
805 check_needs_store(state,
806 ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
807 ds_attachment->last_subpass,
808 ds_attachment->desc.storeOp);
809
810 do_early_zs_clear = needs_depth_clear && !needs_depth_store;
811 if (do_early_zs_clear &&
812 vk_format_has_stencil(ds_attachment->desc.format)) {
813 bool needs_stencil_load =
814 check_needs_load(state,
815 ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
816 ds_attachment->first_subpass,
817 ds_attachment->desc.stencilLoadOp);
818
819 bool needs_stencil_store =
820 check_needs_store(state,
821 ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
822 ds_attachment->last_subpass,
823 ds_attachment->desc.stencilStoreOp);
824
825 do_early_zs_clear = !needs_stencil_load && !needs_stencil_store;
826 }
827
828 config.early_depth_stencil_clear = do_early_zs_clear;
829 } else {
830 config.early_z_disable = true;
831 }
832 }
833
834 /* If we enabled early Z/S clear, then we can't emit any "Clear Tile Buffers"
835 * commands with the Z/S bit set, so keep track of whether we enabled this
836 * in the job so we can skip these later.
837 */
838 job->early_zs_clear = do_early_zs_clear;
839
840 for (uint32_t i = 0; i < subpass->color_count; i++) {
841 uint32_t attachment_idx = subpass->color_attachments[i].attachment;
842 if (attachment_idx == VK_ATTACHMENT_UNUSED)
843 continue;
844
845 struct v3dv_image_view *iview =
846 state->framebuffer->attachments[attachment_idx];
847
848 const struct v3dv_image *image = (struct v3dv_image *) iview->vk.image;
849 const struct v3d_resource_slice *slice =
850 &image->slices[iview->vk.base_mip_level];
851
852 const uint32_t *clear_color =
853 &state->attachments[attachment_idx].clear_value.color[0];
854
855 uint32_t clear_pad = 0;
856 if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
857 slice->tiling == V3D_TILING_UIF_XOR) {
858 int uif_block_height = v3d_utile_height(image->cpp) * 2;
859
860 uint32_t implicit_padded_height =
861 align(framebuffer->height, uif_block_height) / uif_block_height;
862
863 if (slice->padded_height_of_output_image_in_uif_blocks -
864 implicit_padded_height >= 15) {
865 clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
866 }
867 }
868
869 cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
870 clear.clear_color_low_32_bits = clear_color[0];
871 clear.clear_color_next_24_bits = clear_color[1] & 0xffffff;
872 clear.render_target_number = i;
873 };
874
875 if (iview->internal_bpp >= V3D_INTERNAL_BPP_64) {
876 cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
877 clear.clear_color_mid_low_32_bits =
878 ((clear_color[1] >> 24) | (clear_color[2] << 8));
879 clear.clear_color_mid_high_24_bits =
880 ((clear_color[2] >> 24) | ((clear_color[3] & 0xffff) << 8));
881 clear.render_target_number = i;
882 };
883 }
884
885 if (iview->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
886 cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
887 clear.uif_padded_height_in_uif_blocks = clear_pad;
888 clear.clear_color_high_16_bits = clear_color[3] >> 16;
889 clear.render_target_number = i;
890 };
891 }
892 }
893
894 cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
895 v3dX(cmd_buffer_render_pass_setup_render_target)
896 (cmd_buffer, 0, &rt.render_target_0_internal_bpp,
897 &rt.render_target_0_internal_type, &rt.render_target_0_clamp);
898 v3dX(cmd_buffer_render_pass_setup_render_target)
899 (cmd_buffer, 1, &rt.render_target_1_internal_bpp,
900 &rt.render_target_1_internal_type, &rt.render_target_1_clamp);
901 v3dX(cmd_buffer_render_pass_setup_render_target)
902 (cmd_buffer, 2, &rt.render_target_2_internal_bpp,
903 &rt.render_target_2_internal_type, &rt.render_target_2_clamp);
904 v3dX(cmd_buffer_render_pass_setup_render_target)
905 (cmd_buffer, 3, &rt.render_target_3_internal_bpp,
906 &rt.render_target_3_internal_type, &rt.render_target_3_clamp);
907 }
908
909 /* Ends rendering mode config. */
910 if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
911 cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
912 clear.z_clear_value =
913 state->attachments[ds_attachment_idx].clear_value.z;
914 clear.stencil_clear_value =
915 state->attachments[ds_attachment_idx].clear_value.s;
916 };
917 } else {
918 cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
919 clear.z_clear_value = 1.0f;
920 clear.stencil_clear_value = 0;
921 };
922 }
923
924 /* Always set initial block size before the first branch, which needs
925 * to match the value from binning mode config.
926 */
927 cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
928 init.use_auto_chained_tile_lists = true;
929 init.size_of_first_block_in_chained_tile_lists =
930 TILE_ALLOCATION_BLOCK_SIZE_64B;
931 }
932
933 cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
934 config.number_of_bin_tile_lists = 1;
935 config.total_frame_width_in_tiles = tiling->draw_tiles_x;
936 config.total_frame_height_in_tiles = tiling->draw_tiles_y;
937
938 config.supertile_width_in_tiles = tiling->supertile_width;
939 config.supertile_height_in_tiles = tiling->supertile_height;
940
941 config.total_frame_width_in_supertiles =
942 tiling->frame_width_in_supertiles;
943 config.total_frame_height_in_supertiles =
944 tiling->frame_height_in_supertiles;
945 }
946
947 /* Start by clearing the tile buffer. */
948 cl_emit(rcl, TILE_COORDINATES, coords) {
949 coords.tile_column_number = 0;
950 coords.tile_row_number = 0;
951 }
952
953 /* Emit an initial clear of the tile buffers. This is necessary
954 * for any buffers that should be cleared (since clearing
955 * normally happens at the *end* of the generic tile list), but
956 * it's also nice to clear everything so the first tile doesn't
957 * inherit any contents from some previous frame.
958 *
959 * Also, implement the GFXH-1742 workaround. There's a race in
960 * the HW between the RCL updating the TLB's internal type/size
961 * and the spawning of the QPU instances using the TLB's current
962 * internal type/size. To make sure the QPUs get the right
963 * state, we need 1 dummy store in between internal type/size
964 * changes on V3D 3.x, and 2 dummy stores on 4.x.
965 */
966 for (int i = 0; i < 2; i++) {
967 if (i > 0)
968 cl_emit(rcl, TILE_COORDINATES, coords);
969 cl_emit(rcl, END_OF_LOADS, end);
970 cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
971 store.buffer_to_store = NONE;
972 }
973 if (i == 0 && cmd_buffer->state.tile_aligned_render_area) {
974 cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
975 clear.clear_z_stencil_buffer = !job->early_zs_clear;
976 clear.clear_all_render_targets = true;
977 }
978 }
979 cl_emit(rcl, END_OF_TILE_MARKER, end);
980 }
981
982 cl_emit(rcl, FLUSH_VCD_CACHE, flush);
983
984 for (int layer = 0; layer < MAX2(1, fb_layers); layer++) {
985 if (subpass->view_mask == 0 || (subpass->view_mask & (1u << layer)))
986 cmd_buffer_emit_render_pass_layer_rcl(cmd_buffer, layer);
987 }
988
989 cl_emit(rcl, END_OF_RENDERING, end);
990 }
991
992 void
v3dX(cmd_buffer_emit_viewport)993 v3dX(cmd_buffer_emit_viewport)(struct v3dv_cmd_buffer *cmd_buffer)
994 {
995 struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
996 /* FIXME: right now we only support one viewport. viewporst[0] would work
997 * now, would need to change if we allow multiple viewports
998 */
999 float *vptranslate = dynamic->viewport.translate[0];
1000 float *vpscale = dynamic->viewport.scale[0];
1001
1002 struct v3dv_job *job = cmd_buffer->state.job;
1003 assert(job);
1004
1005 const uint32_t required_cl_size =
1006 cl_packet_length(CLIPPER_XY_SCALING) +
1007 cl_packet_length(CLIPPER_Z_SCALE_AND_OFFSET) +
1008 cl_packet_length(CLIPPER_Z_MIN_MAX_CLIPPING_PLANES) +
1009 cl_packet_length(VIEWPORT_OFFSET);
1010 v3dv_cl_ensure_space_with_branch(&job->bcl, required_cl_size);
1011 v3dv_return_if_oom(cmd_buffer, NULL);
1012
1013 cl_emit(&job->bcl, CLIPPER_XY_SCALING, clip) {
1014 clip.viewport_half_width_in_1_256th_of_pixel = vpscale[0] * 256.0f;
1015 clip.viewport_half_height_in_1_256th_of_pixel = vpscale[1] * 256.0f;
1016 }
1017
1018 cl_emit(&job->bcl, CLIPPER_Z_SCALE_AND_OFFSET, clip) {
1019 clip.viewport_z_offset_zc_to_zs = vptranslate[2];
1020 clip.viewport_z_scale_zc_to_zs = vpscale[2];
1021 }
1022 cl_emit(&job->bcl, CLIPPER_Z_MIN_MAX_CLIPPING_PLANES, clip) {
1023 /* Vulkan's Z NDC is [0..1], unlile OpenGL which is [-1, 1] */
1024 float z1 = vptranslate[2];
1025 float z2 = vptranslate[2] + vpscale[2];
1026 clip.minimum_zw = MIN2(z1, z2);
1027 clip.maximum_zw = MAX2(z1, z2);
1028 }
1029
1030 cl_emit(&job->bcl, VIEWPORT_OFFSET, vp) {
1031 vp.viewport_centre_x_coordinate = vptranslate[0];
1032 vp.viewport_centre_y_coordinate = vptranslate[1];
1033 }
1034
1035 cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_VIEWPORT;
1036 }
1037
1038 void
v3dX(cmd_buffer_emit_stencil)1039 v3dX(cmd_buffer_emit_stencil)(struct v3dv_cmd_buffer *cmd_buffer)
1040 {
1041 struct v3dv_job *job = cmd_buffer->state.job;
1042 assert(job);
1043
1044 struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1045 struct v3dv_dynamic_state *dynamic_state = &cmd_buffer->state.dynamic;
1046
1047 const uint32_t dynamic_stencil_states = V3DV_DYNAMIC_STENCIL_COMPARE_MASK |
1048 V3DV_DYNAMIC_STENCIL_WRITE_MASK |
1049 V3DV_DYNAMIC_STENCIL_REFERENCE;
1050
1051 v3dv_cl_ensure_space_with_branch(&job->bcl,
1052 2 * cl_packet_length(STENCIL_CFG));
1053 v3dv_return_if_oom(cmd_buffer, NULL);
1054
1055 bool emitted_stencil = false;
1056 for (uint32_t i = 0; i < 2; i++) {
1057 if (pipeline->emit_stencil_cfg[i]) {
1058 if (dynamic_state->mask & dynamic_stencil_states) {
1059 cl_emit_with_prepacked(&job->bcl, STENCIL_CFG,
1060 pipeline->stencil_cfg[i], config) {
1061 if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK) {
1062 config.stencil_test_mask =
1063 i == 0 ? dynamic_state->stencil_compare_mask.front :
1064 dynamic_state->stencil_compare_mask.back;
1065 }
1066 if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK) {
1067 config.stencil_write_mask =
1068 i == 0 ? dynamic_state->stencil_write_mask.front :
1069 dynamic_state->stencil_write_mask.back;
1070 }
1071 if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_REFERENCE) {
1072 config.stencil_ref_value =
1073 i == 0 ? dynamic_state->stencil_reference.front :
1074 dynamic_state->stencil_reference.back;
1075 }
1076 }
1077 } else {
1078 cl_emit_prepacked(&job->bcl, &pipeline->stencil_cfg[i]);
1079 }
1080
1081 emitted_stencil = true;
1082 }
1083 }
1084
1085 if (emitted_stencil) {
1086 const uint32_t dynamic_stencil_dirty_flags =
1087 V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
1088 V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
1089 V3DV_CMD_DIRTY_STENCIL_REFERENCE;
1090 cmd_buffer->state.dirty &= ~dynamic_stencil_dirty_flags;
1091 }
1092 }
1093
1094 void
v3dX(cmd_buffer_emit_depth_bias)1095 v3dX(cmd_buffer_emit_depth_bias)(struct v3dv_cmd_buffer *cmd_buffer)
1096 {
1097 struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1098 assert(pipeline);
1099
1100 if (!pipeline->depth_bias.enabled)
1101 return;
1102
1103 struct v3dv_job *job = cmd_buffer->state.job;
1104 assert(job);
1105
1106 v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(DEPTH_OFFSET));
1107 v3dv_return_if_oom(cmd_buffer, NULL);
1108
1109 struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1110 cl_emit(&job->bcl, DEPTH_OFFSET, bias) {
1111 bias.depth_offset_factor = dynamic->depth_bias.slope_factor;
1112 bias.depth_offset_units = dynamic->depth_bias.constant_factor;
1113 if (pipeline->depth_bias.is_z16)
1114 bias.depth_offset_units *= 256.0f;
1115 bias.limit = dynamic->depth_bias.depth_bias_clamp;
1116 }
1117
1118 cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_DEPTH_BIAS;
1119 }
1120
1121 void
v3dX(cmd_buffer_emit_line_width)1122 v3dX(cmd_buffer_emit_line_width)(struct v3dv_cmd_buffer *cmd_buffer)
1123 {
1124 struct v3dv_job *job = cmd_buffer->state.job;
1125 assert(job);
1126
1127 v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(LINE_WIDTH));
1128 v3dv_return_if_oom(cmd_buffer, NULL);
1129
1130 cl_emit(&job->bcl, LINE_WIDTH, line) {
1131 line.line_width = cmd_buffer->state.dynamic.line_width;
1132 }
1133
1134 cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_LINE_WIDTH;
1135 }
1136
1137 void
v3dX(cmd_buffer_emit_sample_state)1138 v3dX(cmd_buffer_emit_sample_state)(struct v3dv_cmd_buffer *cmd_buffer)
1139 {
1140 struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1141 assert(pipeline);
1142
1143 struct v3dv_job *job = cmd_buffer->state.job;
1144 assert(job);
1145
1146 v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(SAMPLE_STATE));
1147 v3dv_return_if_oom(cmd_buffer, NULL);
1148
1149 cl_emit(&job->bcl, SAMPLE_STATE, state) {
1150 state.coverage = 1.0f;
1151 state.mask = pipeline->sample_mask;
1152 }
1153 }
1154
1155 void
v3dX(cmd_buffer_emit_blend)1156 v3dX(cmd_buffer_emit_blend)(struct v3dv_cmd_buffer *cmd_buffer)
1157 {
1158 struct v3dv_job *job = cmd_buffer->state.job;
1159 assert(job);
1160
1161 struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1162 assert(pipeline);
1163
1164 const uint32_t blend_packets_size =
1165 cl_packet_length(BLEND_ENABLES) +
1166 cl_packet_length(BLEND_CONSTANT_COLOR) +
1167 cl_packet_length(BLEND_CFG) * V3D_MAX_DRAW_BUFFERS;
1168
1169 v3dv_cl_ensure_space_with_branch(&job->bcl, blend_packets_size);
1170 v3dv_return_if_oom(cmd_buffer, NULL);
1171
1172 if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
1173 if (pipeline->blend.enables) {
1174 cl_emit(&job->bcl, BLEND_ENABLES, enables) {
1175 enables.mask = pipeline->blend.enables;
1176 }
1177 }
1178
1179 for (uint32_t i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) {
1180 if (pipeline->blend.enables & (1 << i))
1181 cl_emit_prepacked(&job->bcl, &pipeline->blend.cfg[i]);
1182 }
1183 }
1184
1185 if (pipeline->blend.needs_color_constants &&
1186 cmd_buffer->state.dirty & V3DV_CMD_DIRTY_BLEND_CONSTANTS) {
1187 struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1188 cl_emit(&job->bcl, BLEND_CONSTANT_COLOR, color) {
1189 color.red_f16 = _mesa_float_to_half(dynamic->blend_constants[0]);
1190 color.green_f16 = _mesa_float_to_half(dynamic->blend_constants[1]);
1191 color.blue_f16 = _mesa_float_to_half(dynamic->blend_constants[2]);
1192 color.alpha_f16 = _mesa_float_to_half(dynamic->blend_constants[3]);
1193 }
1194 cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_BLEND_CONSTANTS;
1195 }
1196 }
1197
1198 void
v3dX(cmd_buffer_emit_color_write_mask)1199 v3dX(cmd_buffer_emit_color_write_mask)(struct v3dv_cmd_buffer *cmd_buffer)
1200 {
1201 struct v3dv_job *job = cmd_buffer->state.job;
1202 v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(COLOR_WRITE_MASKS));
1203
1204 struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1205 struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1206 cl_emit(&job->bcl, COLOR_WRITE_MASKS, mask) {
1207 mask.mask = (~dynamic->color_write_enable |
1208 pipeline->blend.color_write_masks) & 0xffff;
1209 }
1210
1211 cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_COLOR_WRITE_ENABLE;
1212 }
1213
1214 static void
emit_flat_shade_flags(struct v3dv_job * job,int varying_offset,uint32_t varyings,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher)1215 emit_flat_shade_flags(struct v3dv_job *job,
1216 int varying_offset,
1217 uint32_t varyings,
1218 enum V3DX(Varying_Flags_Action) lower,
1219 enum V3DX(Varying_Flags_Action) higher)
1220 {
1221 v3dv_cl_ensure_space_with_branch(&job->bcl,
1222 cl_packet_length(FLAT_SHADE_FLAGS));
1223 v3dv_return_if_oom(NULL, job);
1224
1225 cl_emit(&job->bcl, FLAT_SHADE_FLAGS, flags) {
1226 flags.varying_offset_v0 = varying_offset;
1227 flags.flat_shade_flags_for_varyings_v024 = varyings;
1228 flags.action_for_flat_shade_flags_of_lower_numbered_varyings = lower;
1229 flags.action_for_flat_shade_flags_of_higher_numbered_varyings = higher;
1230 }
1231 }
1232
1233 static void
emit_noperspective_flags(struct v3dv_job * job,int varying_offset,uint32_t varyings,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher)1234 emit_noperspective_flags(struct v3dv_job *job,
1235 int varying_offset,
1236 uint32_t varyings,
1237 enum V3DX(Varying_Flags_Action) lower,
1238 enum V3DX(Varying_Flags_Action) higher)
1239 {
1240 v3dv_cl_ensure_space_with_branch(&job->bcl,
1241 cl_packet_length(NON_PERSPECTIVE_FLAGS));
1242 v3dv_return_if_oom(NULL, job);
1243
1244 cl_emit(&job->bcl, NON_PERSPECTIVE_FLAGS, flags) {
1245 flags.varying_offset_v0 = varying_offset;
1246 flags.non_perspective_flags_for_varyings_v024 = varyings;
1247 flags.action_for_non_perspective_flags_of_lower_numbered_varyings = lower;
1248 flags.action_for_non_perspective_flags_of_higher_numbered_varyings = higher;
1249 }
1250 }
1251
1252 static void
emit_centroid_flags(struct v3dv_job * job,int varying_offset,uint32_t varyings,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher)1253 emit_centroid_flags(struct v3dv_job *job,
1254 int varying_offset,
1255 uint32_t varyings,
1256 enum V3DX(Varying_Flags_Action) lower,
1257 enum V3DX(Varying_Flags_Action) higher)
1258 {
1259 v3dv_cl_ensure_space_with_branch(&job->bcl,
1260 cl_packet_length(CENTROID_FLAGS));
1261 v3dv_return_if_oom(NULL, job);
1262
1263 cl_emit(&job->bcl, CENTROID_FLAGS, flags) {
1264 flags.varying_offset_v0 = varying_offset;
1265 flags.centroid_flags_for_varyings_v024 = varyings;
1266 flags.action_for_centroid_flags_of_lower_numbered_varyings = lower;
1267 flags.action_for_centroid_flags_of_higher_numbered_varyings = higher;
1268 }
1269 }
1270
1271 static bool
emit_varying_flags(struct v3dv_job * job,uint32_t num_flags,const uint32_t * flags,void (* flag_emit_callback)(struct v3dv_job * job,int varying_offset,uint32_t flags,enum V3DX (Varying_Flags_Action)lower,enum V3DX (Varying_Flags_Action)higher))1272 emit_varying_flags(struct v3dv_job *job,
1273 uint32_t num_flags,
1274 const uint32_t *flags,
1275 void (*flag_emit_callback)(struct v3dv_job *job,
1276 int varying_offset,
1277 uint32_t flags,
1278 enum V3DX(Varying_Flags_Action) lower,
1279 enum V3DX(Varying_Flags_Action) higher))
1280 {
1281 bool emitted_any = false;
1282 for (int i = 0; i < num_flags; i++) {
1283 if (!flags[i])
1284 continue;
1285
1286 if (emitted_any) {
1287 flag_emit_callback(job, i, flags[i],
1288 V3D_VARYING_FLAGS_ACTION_UNCHANGED,
1289 V3D_VARYING_FLAGS_ACTION_UNCHANGED);
1290 } else if (i == 0) {
1291 flag_emit_callback(job, i, flags[i],
1292 V3D_VARYING_FLAGS_ACTION_UNCHANGED,
1293 V3D_VARYING_FLAGS_ACTION_ZEROED);
1294 } else {
1295 flag_emit_callback(job, i, flags[i],
1296 V3D_VARYING_FLAGS_ACTION_ZEROED,
1297 V3D_VARYING_FLAGS_ACTION_ZEROED);
1298 }
1299
1300 emitted_any = true;
1301 }
1302
1303 return emitted_any;
1304 }
1305
1306 void
v3dX(cmd_buffer_emit_varyings_state)1307 v3dX(cmd_buffer_emit_varyings_state)(struct v3dv_cmd_buffer *cmd_buffer)
1308 {
1309 struct v3dv_job *job = cmd_buffer->state.job;
1310 struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1311
1312 struct v3d_fs_prog_data *prog_data_fs =
1313 pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]->prog_data.fs;
1314
1315 const uint32_t num_flags =
1316 ARRAY_SIZE(prog_data_fs->flat_shade_flags);
1317 const uint32_t *flat_shade_flags = prog_data_fs->flat_shade_flags;
1318 const uint32_t *noperspective_flags = prog_data_fs->noperspective_flags;
1319 const uint32_t *centroid_flags = prog_data_fs->centroid_flags;
1320
1321 if (!emit_varying_flags(job, num_flags, flat_shade_flags,
1322 emit_flat_shade_flags)) {
1323 v3dv_cl_ensure_space_with_branch(
1324 &job->bcl, cl_packet_length(ZERO_ALL_FLAT_SHADE_FLAGS));
1325 v3dv_return_if_oom(cmd_buffer, NULL);
1326
1327 cl_emit(&job->bcl, ZERO_ALL_FLAT_SHADE_FLAGS, flags);
1328 }
1329
1330 if (!emit_varying_flags(job, num_flags, noperspective_flags,
1331 emit_noperspective_flags)) {
1332 v3dv_cl_ensure_space_with_branch(
1333 &job->bcl, cl_packet_length(ZERO_ALL_NON_PERSPECTIVE_FLAGS));
1334 v3dv_return_if_oom(cmd_buffer, NULL);
1335
1336 cl_emit(&job->bcl, ZERO_ALL_NON_PERSPECTIVE_FLAGS, flags);
1337 }
1338
1339 if (!emit_varying_flags(job, num_flags, centroid_flags,
1340 emit_centroid_flags)) {
1341 v3dv_cl_ensure_space_with_branch(
1342 &job->bcl, cl_packet_length(ZERO_ALL_CENTROID_FLAGS));
1343 v3dv_return_if_oom(cmd_buffer, NULL);
1344
1345 cl_emit(&job->bcl, ZERO_ALL_CENTROID_FLAGS, flags);
1346 }
1347 }
1348
1349 static void
job_update_ez_state(struct v3dv_job * job,struct v3dv_pipeline * pipeline,struct v3dv_cmd_buffer * cmd_buffer)1350 job_update_ez_state(struct v3dv_job *job,
1351 struct v3dv_pipeline *pipeline,
1352 struct v3dv_cmd_buffer *cmd_buffer)
1353 {
1354 /* If first_ez_state is V3D_EZ_DISABLED it means that we have already
1355 * determined that we should disable EZ completely for all draw calls in
1356 * this job. This will cause us to disable EZ for the entire job in the
1357 * Tile Rendering Mode RCL packet and when we do that we need to make sure
1358 * we never emit a draw call in the job with EZ enabled in the CFG_BITS
1359 * packet, so ez_state must also be V3D_EZ_DISABLED;
1360 */
1361 if (job->first_ez_state == V3D_EZ_DISABLED) {
1362 assert(job->ez_state == V3D_EZ_DISABLED);
1363 return;
1364 }
1365
1366 /* This is part of the pre draw call handling, so we should be inside a
1367 * render pass.
1368 */
1369 assert(cmd_buffer->state.pass);
1370
1371 /* If this is the first time we update EZ state for this job we first check
1372 * if there is anything that requires disabling it completely for the entire
1373 * job (based on state that is not related to the current draw call and
1374 * pipeline state).
1375 */
1376 if (!job->decided_global_ez_enable) {
1377 job->decided_global_ez_enable = true;
1378
1379 struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1380 assert(state->subpass_idx < state->pass->subpass_count);
1381 struct v3dv_subpass *subpass = &state->pass->subpasses[state->subpass_idx];
1382 if (subpass->ds_attachment.attachment == VK_ATTACHMENT_UNUSED) {
1383 job->first_ez_state = V3D_EZ_DISABLED;
1384 job->ez_state = V3D_EZ_DISABLED;
1385 return;
1386 }
1387
1388 /* GFXH-1918: the early-z buffer may load incorrect depth values
1389 * if the frame has odd width or height.
1390 *
1391 * So we need to disable EZ in this case.
1392 */
1393 const struct v3dv_render_pass_attachment *ds_attachment =
1394 &state->pass->attachments[subpass->ds_attachment.attachment];
1395
1396 const VkImageAspectFlags ds_aspects =
1397 vk_format_aspects(ds_attachment->desc.format);
1398
1399 bool needs_depth_load =
1400 check_needs_load(state,
1401 ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1402 ds_attachment->first_subpass,
1403 ds_attachment->desc.loadOp);
1404
1405 if (needs_depth_load) {
1406 struct v3dv_framebuffer *fb = state->framebuffer;
1407
1408 if (!fb) {
1409 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1410 perf_debug("Loading depth aspect in a secondary command buffer "
1411 "without framebuffer info disables early-z tests.\n");
1412 job->first_ez_state = V3D_EZ_DISABLED;
1413 job->ez_state = V3D_EZ_DISABLED;
1414 return;
1415 }
1416
1417 if (((fb->width % 2) != 0 || (fb->height % 2) != 0)) {
1418 perf_debug("Loading depth aspect for framebuffer with odd width "
1419 "or height disables early-Z tests.\n");
1420 job->first_ez_state = V3D_EZ_DISABLED;
1421 job->ez_state = V3D_EZ_DISABLED;
1422 return;
1423 }
1424 }
1425 }
1426
1427 /* Otherwise, we can decide to selectively enable or disable EZ for draw
1428 * calls using the CFG_BITS packet based on the bound pipeline state.
1429 */
1430
1431 /* If the FS writes Z, then it may update against the chosen EZ direction */
1432 struct v3dv_shader_variant *fs_variant =
1433 pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1434 if (fs_variant->prog_data.fs->writes_z) {
1435 job->ez_state = V3D_EZ_DISABLED;
1436 return;
1437 }
1438
1439 switch (pipeline->ez_state) {
1440 case V3D_EZ_UNDECIDED:
1441 /* If the pipeline didn't pick a direction but didn't disable, then go
1442 * along with the current EZ state. This allows EZ optimization for Z
1443 * func == EQUAL or NEVER.
1444 */
1445 break;
1446
1447 case V3D_EZ_LT_LE:
1448 case V3D_EZ_GT_GE:
1449 /* If the pipeline picked a direction, then it needs to match the current
1450 * direction if we've decided on one.
1451 */
1452 if (job->ez_state == V3D_EZ_UNDECIDED)
1453 job->ez_state = pipeline->ez_state;
1454 else if (job->ez_state != pipeline->ez_state)
1455 job->ez_state = V3D_EZ_DISABLED;
1456 break;
1457
1458 case V3D_EZ_DISABLED:
1459 /* If the pipeline disables EZ because of a bad Z func or stencil
1460 * operation, then we can't do any more EZ in this frame.
1461 */
1462 job->ez_state = V3D_EZ_DISABLED;
1463 break;
1464 }
1465
1466 if (job->first_ez_state == V3D_EZ_UNDECIDED &&
1467 job->ez_state != V3D_EZ_DISABLED) {
1468 job->first_ez_state = job->ez_state;
1469 }
1470 }
1471
1472 void
v3dX(cmd_buffer_emit_configuration_bits)1473 v3dX(cmd_buffer_emit_configuration_bits)(struct v3dv_cmd_buffer *cmd_buffer)
1474 {
1475 struct v3dv_job *job = cmd_buffer->state.job;
1476 assert(job);
1477
1478 struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1479 assert(pipeline);
1480
1481 job_update_ez_state(job, pipeline, cmd_buffer);
1482
1483 v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CFG_BITS));
1484 v3dv_return_if_oom(cmd_buffer, NULL);
1485
1486 cl_emit_with_prepacked(&job->bcl, CFG_BITS, pipeline->cfg_bits, config) {
1487 config.early_z_enable = job->ez_state != V3D_EZ_DISABLED;
1488 config.early_z_updates_enable = config.early_z_enable &&
1489 pipeline->z_updates_enable;
1490 }
1491 }
1492
1493 void
v3dX(cmd_buffer_emit_occlusion_query)1494 v3dX(cmd_buffer_emit_occlusion_query)(struct v3dv_cmd_buffer *cmd_buffer)
1495 {
1496 struct v3dv_job *job = cmd_buffer->state.job;
1497 assert(job);
1498
1499 v3dv_cl_ensure_space_with_branch(&job->bcl,
1500 cl_packet_length(OCCLUSION_QUERY_COUNTER));
1501 v3dv_return_if_oom(cmd_buffer, NULL);
1502
1503 cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter) {
1504 if (cmd_buffer->state.query.active_query.bo) {
1505 counter.address =
1506 v3dv_cl_address(cmd_buffer->state.query.active_query.bo,
1507 cmd_buffer->state.query.active_query.offset);
1508 }
1509 }
1510
1511 cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
1512 }
1513
1514 static struct v3dv_job *
cmd_buffer_subpass_split_for_barrier(struct v3dv_cmd_buffer * cmd_buffer,bool is_bcl_barrier)1515 cmd_buffer_subpass_split_for_barrier(struct v3dv_cmd_buffer *cmd_buffer,
1516 bool is_bcl_barrier)
1517 {
1518 assert(cmd_buffer->state.subpass_idx != -1);
1519 v3dv_cmd_buffer_finish_job(cmd_buffer);
1520 struct v3dv_job *job =
1521 v3dv_cmd_buffer_subpass_resume(cmd_buffer,
1522 cmd_buffer->state.subpass_idx);
1523 if (!job)
1524 return NULL;
1525
1526 job->serialize = true;
1527 job->needs_bcl_sync = is_bcl_barrier;
1528 return job;
1529 }
1530
1531 static void
cmd_buffer_copy_secondary_end_query_state(struct v3dv_cmd_buffer * primary,struct v3dv_cmd_buffer * secondary)1532 cmd_buffer_copy_secondary_end_query_state(struct v3dv_cmd_buffer *primary,
1533 struct v3dv_cmd_buffer *secondary)
1534 {
1535 struct v3dv_cmd_buffer_state *p_state = &primary->state;
1536 struct v3dv_cmd_buffer_state *s_state = &secondary->state;
1537
1538 const uint32_t total_state_count =
1539 p_state->query.end.used_count + s_state->query.end.used_count;
1540 v3dv_cmd_buffer_ensure_array_state(primary,
1541 sizeof(struct v3dv_end_query_cpu_job_info),
1542 total_state_count,
1543 &p_state->query.end.alloc_count,
1544 (void **) &p_state->query.end.states);
1545 v3dv_return_if_oom(primary, NULL);
1546
1547 for (uint32_t i = 0; i < s_state->query.end.used_count; i++) {
1548 const struct v3dv_end_query_cpu_job_info *s_qstate =
1549 &secondary->state.query.end.states[i];
1550
1551 struct v3dv_end_query_cpu_job_info *p_qstate =
1552 &p_state->query.end.states[p_state->query.end.used_count++];
1553
1554 p_qstate->pool = s_qstate->pool;
1555 p_qstate->query = s_qstate->query;
1556 }
1557 }
1558
1559 void
v3dX(cmd_buffer_execute_inside_pass)1560 v3dX(cmd_buffer_execute_inside_pass)(struct v3dv_cmd_buffer *primary,
1561 uint32_t cmd_buffer_count,
1562 const VkCommandBuffer *cmd_buffers)
1563 {
1564 assert(primary->state.job);
1565
1566 /* Emit occlusion query state if needed so the draw calls inside our
1567 * secondaries update the counters.
1568 */
1569 bool has_occlusion_query =
1570 primary->state.dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY;
1571 if (has_occlusion_query)
1572 v3dX(cmd_buffer_emit_occlusion_query)(primary);
1573
1574 /* FIXME: if our primary job tiling doesn't enable MSSA but any of the
1575 * pipelines used by the secondaries do, we need to re-start the primary
1576 * job to enable MSAA. See cmd_buffer_restart_job_for_msaa_if_needed.
1577 */
1578 bool pending_barrier = false;
1579 bool pending_bcl_barrier = false;
1580 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1581 V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
1582
1583 assert(secondary->usage_flags &
1584 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
1585
1586 list_for_each_entry(struct v3dv_job, secondary_job,
1587 &secondary->jobs, list_link) {
1588 if (secondary_job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
1589 /* If the job is a CL, then we branch to it from the primary BCL.
1590 * In this case the secondary's BCL is finished with a
1591 * RETURN_FROM_SUB_LIST command to return back to the primary BCL
1592 * once we are done executing it.
1593 */
1594 assert(v3dv_cl_offset(&secondary_job->rcl) == 0);
1595 assert(secondary_job->bcl.bo);
1596
1597 /* Sanity check that secondary BCL ends with RETURN_FROM_SUB_LIST */
1598 STATIC_ASSERT(cl_packet_length(RETURN_FROM_SUB_LIST) == 1);
1599 assert(v3dv_cl_offset(&secondary_job->bcl) >= 1);
1600 assert(*(((uint8_t *)secondary_job->bcl.next) - 1) ==
1601 V3DX(RETURN_FROM_SUB_LIST_opcode));
1602
1603 /* If this secondary has any barriers (or we had any pending barrier
1604 * to apply), then we can't just branch to it from the primary, we
1605 * need to split the primary to create a new job that can consume
1606 * the barriers first.
1607 *
1608 * FIXME: in this case, maybe just copy the secondary BCL without
1609 * the RETURN_FROM_SUB_LIST into the primary job to skip the
1610 * branch?
1611 */
1612 struct v3dv_job *primary_job = primary->state.job;
1613 if (!primary_job || secondary_job->serialize || pending_barrier) {
1614 const bool needs_bcl_barrier =
1615 secondary_job->needs_bcl_sync || pending_bcl_barrier;
1616 primary_job =
1617 cmd_buffer_subpass_split_for_barrier(primary,
1618 needs_bcl_barrier);
1619 v3dv_return_if_oom(primary, NULL);
1620
1621 /* Since we have created a new primary we need to re-emit
1622 * occlusion query state.
1623 */
1624 if (has_occlusion_query)
1625 v3dX(cmd_buffer_emit_occlusion_query)(primary);
1626 }
1627
1628 /* Make sure our primary job has all required BO references */
1629 set_foreach(secondary_job->bos, entry) {
1630 struct v3dv_bo *bo = (struct v3dv_bo *)entry->key;
1631 v3dv_job_add_bo(primary_job, bo);
1632 }
1633
1634 /* Emit required branch instructions. We expect each of these
1635 * to end with a corresponding 'return from sub list' item.
1636 */
1637 list_for_each_entry(struct v3dv_bo, bcl_bo,
1638 &secondary_job->bcl.bo_list, list_link) {
1639 v3dv_cl_ensure_space_with_branch(&primary_job->bcl,
1640 cl_packet_length(BRANCH_TO_SUB_LIST));
1641 v3dv_return_if_oom(primary, NULL);
1642 cl_emit(&primary_job->bcl, BRANCH_TO_SUB_LIST, branch) {
1643 branch.address = v3dv_cl_address(bcl_bo, 0);
1644 }
1645 }
1646
1647 primary_job->tmu_dirty_rcl |= secondary_job->tmu_dirty_rcl;
1648 } else {
1649 /* This is a regular job (CPU or GPU), so just finish the current
1650 * primary job (if any) and then add the secondary job to the
1651 * primary's job list right after it.
1652 */
1653 v3dv_cmd_buffer_finish_job(primary);
1654 v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
1655 if (pending_barrier) {
1656 secondary_job->serialize = true;
1657 if (pending_bcl_barrier)
1658 secondary_job->needs_bcl_sync = true;
1659 }
1660 }
1661
1662 pending_barrier = false;
1663 pending_bcl_barrier = false;
1664 }
1665
1666 /* If the secondary has recorded any vkCmdEndQuery commands, we need to
1667 * copy this state to the primary so it is processed properly when the
1668 * current primary job is finished.
1669 */
1670 cmd_buffer_copy_secondary_end_query_state(primary, secondary);
1671
1672 /* If this secondary had any pending barrier state we will need that
1673 * barrier state consumed with whatever comes next in the primary.
1674 */
1675 assert(secondary->state.has_barrier || !secondary->state.has_bcl_barrier);
1676 pending_barrier = secondary->state.has_barrier;
1677 pending_bcl_barrier = secondary->state.has_bcl_barrier;
1678 }
1679
1680 if (pending_barrier) {
1681 primary->state.has_barrier = true;
1682 primary->state.has_bcl_barrier |= pending_bcl_barrier;
1683 }
1684 }
1685
1686 static void
emit_gs_shader_state_record(struct v3dv_job * job,struct v3dv_bo * assembly_bo,struct v3dv_shader_variant * gs_bin,struct v3dv_cl_reloc gs_bin_uniforms,struct v3dv_shader_variant * gs,struct v3dv_cl_reloc gs_render_uniforms)1687 emit_gs_shader_state_record(struct v3dv_job *job,
1688 struct v3dv_bo *assembly_bo,
1689 struct v3dv_shader_variant *gs_bin,
1690 struct v3dv_cl_reloc gs_bin_uniforms,
1691 struct v3dv_shader_variant *gs,
1692 struct v3dv_cl_reloc gs_render_uniforms)
1693 {
1694 cl_emit(&job->indirect, GEOMETRY_SHADER_STATE_RECORD, shader) {
1695 shader.geometry_bin_mode_shader_code_address =
1696 v3dv_cl_address(assembly_bo, gs_bin->assembly_offset);
1697 shader.geometry_bin_mode_shader_4_way_threadable =
1698 gs_bin->prog_data.gs->base.threads == 4;
1699 shader.geometry_bin_mode_shader_start_in_final_thread_section =
1700 gs_bin->prog_data.gs->base.single_seg;
1701 shader.geometry_bin_mode_shader_propagate_nans = true;
1702 shader.geometry_bin_mode_shader_uniforms_address =
1703 gs_bin_uniforms;
1704
1705 shader.geometry_render_mode_shader_code_address =
1706 v3dv_cl_address(assembly_bo, gs->assembly_offset);
1707 shader.geometry_render_mode_shader_4_way_threadable =
1708 gs->prog_data.gs->base.threads == 4;
1709 shader.geometry_render_mode_shader_start_in_final_thread_section =
1710 gs->prog_data.gs->base.single_seg;
1711 shader.geometry_render_mode_shader_propagate_nans = true;
1712 shader.geometry_render_mode_shader_uniforms_address =
1713 gs_render_uniforms;
1714 }
1715 }
1716
1717 static uint8_t
v3d_gs_output_primitive(uint32_t prim_type)1718 v3d_gs_output_primitive(uint32_t prim_type)
1719 {
1720 switch (prim_type) {
1721 case GL_POINTS:
1722 return GEOMETRY_SHADER_POINTS;
1723 case GL_LINE_STRIP:
1724 return GEOMETRY_SHADER_LINE_STRIP;
1725 case GL_TRIANGLE_STRIP:
1726 return GEOMETRY_SHADER_TRI_STRIP;
1727 default:
1728 unreachable("Unsupported primitive type");
1729 }
1730 }
1731
1732 static void
emit_tes_gs_common_params(struct v3dv_job * job,uint8_t gs_out_prim_type,uint8_t gs_num_invocations)1733 emit_tes_gs_common_params(struct v3dv_job *job,
1734 uint8_t gs_out_prim_type,
1735 uint8_t gs_num_invocations)
1736 {
1737 cl_emit(&job->indirect, TESSELLATION_GEOMETRY_COMMON_PARAMS, shader) {
1738 shader.tessellation_type = TESSELLATION_TYPE_TRIANGLE;
1739 shader.tessellation_point_mode = false;
1740 shader.tessellation_edge_spacing = TESSELLATION_EDGE_SPACING_EVEN;
1741 shader.tessellation_clockwise = true;
1742 shader.tessellation_invocations = 1;
1743
1744 shader.geometry_shader_output_format =
1745 v3d_gs_output_primitive(gs_out_prim_type);
1746 shader.geometry_shader_instances = gs_num_invocations & 0x1F;
1747 }
1748 }
1749
1750 static uint8_t
simd_width_to_gs_pack_mode(uint32_t width)1751 simd_width_to_gs_pack_mode(uint32_t width)
1752 {
1753 switch (width) {
1754 case 16:
1755 return V3D_PACK_MODE_16_WAY;
1756 case 8:
1757 return V3D_PACK_MODE_8_WAY;
1758 case 4:
1759 return V3D_PACK_MODE_4_WAY;
1760 case 1:
1761 return V3D_PACK_MODE_1_WAY;
1762 default:
1763 unreachable("Invalid SIMD width");
1764 };
1765 }
1766
1767 static void
emit_tes_gs_shader_params(struct v3dv_job * job,uint32_t gs_simd,uint32_t gs_vpm_output_size,uint32_t gs_max_vpm_input_size_per_batch)1768 emit_tes_gs_shader_params(struct v3dv_job *job,
1769 uint32_t gs_simd,
1770 uint32_t gs_vpm_output_size,
1771 uint32_t gs_max_vpm_input_size_per_batch)
1772 {
1773 cl_emit(&job->indirect, TESSELLATION_GEOMETRY_SHADER_PARAMS, shader) {
1774 shader.tcs_batch_flush_mode = V3D_TCS_FLUSH_MODE_FULLY_PACKED;
1775 shader.per_patch_data_column_depth = 1;
1776 shader.tcs_output_segment_size_in_sectors = 1;
1777 shader.tcs_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
1778 shader.tes_output_segment_size_in_sectors = 1;
1779 shader.tes_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
1780 shader.gs_output_segment_size_in_sectors = gs_vpm_output_size;
1781 shader.gs_output_segment_pack_mode =
1782 simd_width_to_gs_pack_mode(gs_simd);
1783 shader.tbg_max_patches_per_tcs_batch = 1;
1784 shader.tbg_max_extra_vertex_segs_for_patches_after_first = 0;
1785 shader.tbg_min_tcs_output_segments_required_in_play = 1;
1786 shader.tbg_min_per_patch_data_segments_required_in_play = 1;
1787 shader.tpg_max_patches_per_tes_batch = 1;
1788 shader.tpg_max_vertex_segments_per_tes_batch = 0;
1789 shader.tpg_max_tcs_output_segments_per_tes_batch = 1;
1790 shader.tpg_min_tes_output_segments_required_in_play = 1;
1791 shader.gbg_max_tes_output_vertex_segments_per_gs_batch =
1792 gs_max_vpm_input_size_per_batch;
1793 shader.gbg_min_gs_output_segments_required_in_play = 1;
1794 }
1795 }
1796
1797 void
v3dX(cmd_buffer_emit_gl_shader_state)1798 v3dX(cmd_buffer_emit_gl_shader_state)(struct v3dv_cmd_buffer *cmd_buffer)
1799 {
1800 struct v3dv_job *job = cmd_buffer->state.job;
1801 assert(job);
1802
1803 struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1804 struct v3dv_pipeline *pipeline = state->gfx.pipeline;
1805 assert(pipeline);
1806
1807 struct v3dv_shader_variant *vs_variant =
1808 pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
1809 struct v3d_vs_prog_data *prog_data_vs = vs_variant->prog_data.vs;
1810
1811 struct v3dv_shader_variant *vs_bin_variant =
1812 pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
1813 struct v3d_vs_prog_data *prog_data_vs_bin = vs_bin_variant->prog_data.vs;
1814
1815 struct v3dv_shader_variant *fs_variant =
1816 pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1817 struct v3d_fs_prog_data *prog_data_fs = fs_variant->prog_data.fs;
1818
1819 struct v3dv_shader_variant *gs_variant = NULL;
1820 struct v3dv_shader_variant *gs_bin_variant = NULL;
1821 struct v3d_gs_prog_data *prog_data_gs = NULL;
1822 struct v3d_gs_prog_data *prog_data_gs_bin = NULL;
1823 if (pipeline->has_gs) {
1824 gs_variant =
1825 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
1826 prog_data_gs = gs_variant->prog_data.gs;
1827
1828 gs_bin_variant =
1829 pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
1830 prog_data_gs_bin = gs_bin_variant->prog_data.gs;
1831 }
1832
1833 /* Update the cache dirty flag based on the shader progs data */
1834 job->tmu_dirty_rcl |= prog_data_vs_bin->base.tmu_dirty_rcl;
1835 job->tmu_dirty_rcl |= prog_data_vs->base.tmu_dirty_rcl;
1836 job->tmu_dirty_rcl |= prog_data_fs->base.tmu_dirty_rcl;
1837 if (pipeline->has_gs) {
1838 job->tmu_dirty_rcl |= prog_data_gs_bin->base.tmu_dirty_rcl;
1839 job->tmu_dirty_rcl |= prog_data_gs->base.tmu_dirty_rcl;
1840 }
1841
1842 /* See GFXH-930 workaround below */
1843 uint32_t num_elements_to_emit = MAX2(pipeline->va_count, 1);
1844
1845 uint32_t shader_state_record_length =
1846 cl_packet_length(GL_SHADER_STATE_RECORD);
1847 if (pipeline->has_gs) {
1848 shader_state_record_length +=
1849 cl_packet_length(GEOMETRY_SHADER_STATE_RECORD) +
1850 cl_packet_length(TESSELLATION_GEOMETRY_COMMON_PARAMS) +
1851 2 * cl_packet_length(TESSELLATION_GEOMETRY_SHADER_PARAMS);
1852 }
1853
1854 uint32_t shader_rec_offset =
1855 v3dv_cl_ensure_space(&job->indirect,
1856 shader_state_record_length +
1857 num_elements_to_emit *
1858 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
1859 32);
1860 v3dv_return_if_oom(cmd_buffer, NULL);
1861
1862 struct v3dv_bo *assembly_bo = pipeline->shared_data->assembly_bo;
1863
1864 if (pipeline->has_gs) {
1865 emit_gs_shader_state_record(job,
1866 assembly_bo,
1867 gs_bin_variant,
1868 cmd_buffer->state.uniforms.gs_bin,
1869 gs_variant,
1870 cmd_buffer->state.uniforms.gs);
1871
1872 emit_tes_gs_common_params(job,
1873 prog_data_gs->out_prim_type,
1874 prog_data_gs->num_invocations);
1875
1876 emit_tes_gs_shader_params(job,
1877 pipeline->vpm_cfg_bin.gs_width,
1878 pipeline->vpm_cfg_bin.Gd,
1879 pipeline->vpm_cfg_bin.Gv);
1880
1881 emit_tes_gs_shader_params(job,
1882 pipeline->vpm_cfg.gs_width,
1883 pipeline->vpm_cfg.Gd,
1884 pipeline->vpm_cfg.Gv);
1885 }
1886
1887 struct v3dv_bo *default_attribute_values =
1888 pipeline->default_attribute_values != NULL ?
1889 pipeline->default_attribute_values :
1890 pipeline->device->default_attribute_float;
1891
1892 cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_RECORD,
1893 pipeline->shader_state_record, shader) {
1894
1895 /* FIXME: we are setting this values here and during the
1896 * prepacking. This is because both cl_emit_with_prepacked and v3dvx_pack
1897 * asserts for minimum values of these. It would be good to get
1898 * v3dvx_pack to assert on the final value if possible
1899 */
1900 shader.min_coord_shader_input_segments_required_in_play =
1901 pipeline->vpm_cfg_bin.As;
1902 shader.min_vertex_shader_input_segments_required_in_play =
1903 pipeline->vpm_cfg.As;
1904
1905 shader.coordinate_shader_code_address =
1906 v3dv_cl_address(assembly_bo, vs_bin_variant->assembly_offset);
1907 shader.vertex_shader_code_address =
1908 v3dv_cl_address(assembly_bo, vs_variant->assembly_offset);
1909 shader.fragment_shader_code_address =
1910 v3dv_cl_address(assembly_bo, fs_variant->assembly_offset);
1911
1912 shader.coordinate_shader_uniforms_address = cmd_buffer->state.uniforms.vs_bin;
1913 shader.vertex_shader_uniforms_address = cmd_buffer->state.uniforms.vs;
1914 shader.fragment_shader_uniforms_address = cmd_buffer->state.uniforms.fs;
1915
1916 shader.address_of_default_attribute_values =
1917 v3dv_cl_address(default_attribute_values, 0);
1918
1919 shader.any_shader_reads_hardware_written_primitive_id =
1920 (pipeline->has_gs && prog_data_gs->uses_pid) || prog_data_fs->uses_pid;
1921 shader.insert_primitive_id_as_first_varying_to_fragment_shader =
1922 !pipeline->has_gs && prog_data_fs->uses_pid;
1923 }
1924
1925 /* Upload vertex element attributes (SHADER_STATE_ATTRIBUTE_RECORD) */
1926 bool cs_loaded_any = false;
1927 const bool cs_uses_builtins = prog_data_vs_bin->uses_iid ||
1928 prog_data_vs_bin->uses_biid ||
1929 prog_data_vs_bin->uses_vid;
1930 const uint32_t packet_length =
1931 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
1932
1933 uint32_t emitted_va_count = 0;
1934 for (uint32_t i = 0; emitted_va_count < pipeline->va_count; i++) {
1935 assert(i < MAX_VERTEX_ATTRIBS);
1936
1937 if (pipeline->va[i].vk_format == VK_FORMAT_UNDEFINED)
1938 continue;
1939
1940 const uint32_t binding = pipeline->va[i].binding;
1941
1942 /* We store each vertex attribute in the array using its driver location
1943 * as index.
1944 */
1945 const uint32_t location = i;
1946
1947 struct v3dv_vertex_binding *c_vb = &cmd_buffer->state.vertex_bindings[binding];
1948
1949 cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD,
1950 &pipeline->vertex_attrs[i * packet_length], attr) {
1951
1952 assert(c_vb->buffer->mem->bo);
1953 attr.address = v3dv_cl_address(c_vb->buffer->mem->bo,
1954 c_vb->buffer->mem_offset +
1955 pipeline->va[i].offset +
1956 c_vb->offset);
1957
1958 attr.number_of_values_read_by_coordinate_shader =
1959 prog_data_vs_bin->vattr_sizes[location];
1960 attr.number_of_values_read_by_vertex_shader =
1961 prog_data_vs->vattr_sizes[location];
1962
1963 /* GFXH-930: At least one attribute must be enabled and read by CS
1964 * and VS. If we have attributes being consumed by the VS but not
1965 * the CS, then set up a dummy load of the last attribute into the
1966 * CS's VPM inputs. (Since CS is just dead-code-elimination compared
1967 * to VS, we can't have CS loading but not VS).
1968 *
1969 * GFXH-1602: first attribute must be active if using builtins.
1970 */
1971 if (prog_data_vs_bin->vattr_sizes[location])
1972 cs_loaded_any = true;
1973
1974 if (i == 0 && cs_uses_builtins && !cs_loaded_any) {
1975 attr.number_of_values_read_by_coordinate_shader = 1;
1976 cs_loaded_any = true;
1977 } else if (i == pipeline->va_count - 1 && !cs_loaded_any) {
1978 attr.number_of_values_read_by_coordinate_shader = 1;
1979 cs_loaded_any = true;
1980 }
1981
1982 attr.maximum_index = 0xffffff;
1983 }
1984
1985 emitted_va_count++;
1986 }
1987
1988 if (pipeline->va_count == 0) {
1989 /* GFXH-930: At least one attribute must be enabled and read
1990 * by CS and VS. If we have no attributes being consumed by
1991 * the shader, set up a dummy to be loaded into the VPM.
1992 */
1993 cl_emit(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) {
1994 /* Valid address of data whose value will be unused. */
1995 attr.address = v3dv_cl_address(job->indirect.bo, 0);
1996
1997 attr.type = ATTRIBUTE_FLOAT;
1998 attr.stride = 0;
1999 attr.vec_size = 1;
2000
2001 attr.number_of_values_read_by_coordinate_shader = 1;
2002 attr.number_of_values_read_by_vertex_shader = 1;
2003 }
2004 }
2005
2006 if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
2007 v3dv_cl_ensure_space_with_branch(&job->bcl,
2008 sizeof(pipeline->vcm_cache_size));
2009 v3dv_return_if_oom(cmd_buffer, NULL);
2010
2011 cl_emit_prepacked(&job->bcl, &pipeline->vcm_cache_size);
2012 }
2013
2014 v3dv_cl_ensure_space_with_branch(&job->bcl,
2015 cl_packet_length(GL_SHADER_STATE));
2016 v3dv_return_if_oom(cmd_buffer, NULL);
2017
2018 if (pipeline->has_gs) {
2019 cl_emit(&job->bcl, GL_SHADER_STATE_INCLUDING_GS, state) {
2020 state.address = v3dv_cl_address(job->indirect.bo, shader_rec_offset);
2021 state.number_of_attribute_arrays = num_elements_to_emit;
2022 }
2023 } else {
2024 cl_emit(&job->bcl, GL_SHADER_STATE, state) {
2025 state.address = v3dv_cl_address(job->indirect.bo, shader_rec_offset);
2026 state.number_of_attribute_arrays = num_elements_to_emit;
2027 }
2028 }
2029
2030 cmd_buffer->state.dirty &= ~(V3DV_CMD_DIRTY_VERTEX_BUFFER |
2031 V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
2032 V3DV_CMD_DIRTY_PUSH_CONSTANTS);
2033 cmd_buffer->state.dirty_descriptor_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
2034 cmd_buffer->state.dirty_push_constants_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
2035 }
2036
2037 /* FIXME: C&P from v3dx_draw. Refactor to common place? */
2038 static uint32_t
v3d_hw_prim_type(enum pipe_prim_type prim_type)2039 v3d_hw_prim_type(enum pipe_prim_type prim_type)
2040 {
2041 switch (prim_type) {
2042 case PIPE_PRIM_POINTS:
2043 case PIPE_PRIM_LINES:
2044 case PIPE_PRIM_LINE_LOOP:
2045 case PIPE_PRIM_LINE_STRIP:
2046 case PIPE_PRIM_TRIANGLES:
2047 case PIPE_PRIM_TRIANGLE_STRIP:
2048 case PIPE_PRIM_TRIANGLE_FAN:
2049 return prim_type;
2050
2051 case PIPE_PRIM_LINES_ADJACENCY:
2052 case PIPE_PRIM_LINE_STRIP_ADJACENCY:
2053 case PIPE_PRIM_TRIANGLES_ADJACENCY:
2054 case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
2055 return 8 + (prim_type - PIPE_PRIM_LINES_ADJACENCY);
2056
2057 default:
2058 unreachable("Unsupported primitive type");
2059 }
2060 }
2061
2062 void
v3dX(cmd_buffer_emit_draw)2063 v3dX(cmd_buffer_emit_draw)(struct v3dv_cmd_buffer *cmd_buffer,
2064 struct v3dv_draw_info *info)
2065 {
2066 struct v3dv_job *job = cmd_buffer->state.job;
2067 assert(job);
2068
2069 struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2070 struct v3dv_pipeline *pipeline = state->gfx.pipeline;
2071
2072 assert(pipeline);
2073
2074 uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2075
2076 if (info->first_instance > 0) {
2077 v3dv_cl_ensure_space_with_branch(
2078 &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
2079 v3dv_return_if_oom(cmd_buffer, NULL);
2080
2081 cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
2082 base.base_instance = info->first_instance;
2083 base.base_vertex = 0;
2084 }
2085 }
2086
2087 if (info->instance_count > 1) {
2088 v3dv_cl_ensure_space_with_branch(
2089 &job->bcl, cl_packet_length(VERTEX_ARRAY_INSTANCED_PRIMS));
2090 v3dv_return_if_oom(cmd_buffer, NULL);
2091
2092 cl_emit(&job->bcl, VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
2093 prim.mode = hw_prim_type;
2094 prim.index_of_first_vertex = info->first_vertex;
2095 prim.number_of_instances = info->instance_count;
2096 prim.instance_length = info->vertex_count;
2097 }
2098 } else {
2099 v3dv_cl_ensure_space_with_branch(
2100 &job->bcl, cl_packet_length(VERTEX_ARRAY_PRIMS));
2101 v3dv_return_if_oom(cmd_buffer, NULL);
2102 cl_emit(&job->bcl, VERTEX_ARRAY_PRIMS, prim) {
2103 prim.mode = hw_prim_type;
2104 prim.length = info->vertex_count;
2105 prim.index_of_first_vertex = info->first_vertex;
2106 }
2107 }
2108 }
2109
2110 void
v3dX(cmd_buffer_emit_index_buffer)2111 v3dX(cmd_buffer_emit_index_buffer)(struct v3dv_cmd_buffer *cmd_buffer)
2112 {
2113 struct v3dv_job *job = cmd_buffer->state.job;
2114 assert(job);
2115
2116 /* We flag all state as dirty when we create a new job so make sure we
2117 * have a valid index buffer before attempting to emit state for it.
2118 */
2119 struct v3dv_buffer *ibuffer =
2120 v3dv_buffer_from_handle(cmd_buffer->state.index_buffer.buffer);
2121 if (ibuffer) {
2122 v3dv_cl_ensure_space_with_branch(
2123 &job->bcl, cl_packet_length(INDEX_BUFFER_SETUP));
2124 v3dv_return_if_oom(cmd_buffer, NULL);
2125
2126 const uint32_t offset = cmd_buffer->state.index_buffer.offset;
2127 cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
2128 ib.address = v3dv_cl_address(ibuffer->mem->bo,
2129 ibuffer->mem_offset + offset);
2130 ib.size = ibuffer->mem->bo->size;
2131 }
2132 }
2133
2134 cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_INDEX_BUFFER;
2135 }
2136
2137 void
v3dX(cmd_buffer_emit_draw_indexed)2138 v3dX(cmd_buffer_emit_draw_indexed)(struct v3dv_cmd_buffer *cmd_buffer,
2139 uint32_t indexCount,
2140 uint32_t instanceCount,
2141 uint32_t firstIndex,
2142 int32_t vertexOffset,
2143 uint32_t firstInstance)
2144 {
2145 struct v3dv_job *job = cmd_buffer->state.job;
2146 assert(job);
2147
2148 const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2149 uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2150 uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
2151 uint32_t index_offset = firstIndex * cmd_buffer->state.index_buffer.index_size;
2152
2153 if (vertexOffset != 0 || firstInstance != 0) {
2154 v3dv_cl_ensure_space_with_branch(
2155 &job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
2156 v3dv_return_if_oom(cmd_buffer, NULL);
2157
2158 cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
2159 base.base_instance = firstInstance;
2160 base.base_vertex = vertexOffset;
2161 }
2162 }
2163
2164 if (instanceCount == 1) {
2165 v3dv_cl_ensure_space_with_branch(
2166 &job->bcl, cl_packet_length(INDEXED_PRIM_LIST));
2167 v3dv_return_if_oom(cmd_buffer, NULL);
2168
2169 cl_emit(&job->bcl, INDEXED_PRIM_LIST, prim) {
2170 prim.index_type = index_type;
2171 prim.length = indexCount;
2172 prim.index_offset = index_offset;
2173 prim.mode = hw_prim_type;
2174 prim.enable_primitive_restarts = pipeline->primitive_restart;
2175 }
2176 } else if (instanceCount > 1) {
2177 v3dv_cl_ensure_space_with_branch(
2178 &job->bcl, cl_packet_length(INDEXED_INSTANCED_PRIM_LIST));
2179 v3dv_return_if_oom(cmd_buffer, NULL);
2180
2181 cl_emit(&job->bcl, INDEXED_INSTANCED_PRIM_LIST, prim) {
2182 prim.index_type = index_type;
2183 prim.index_offset = index_offset;
2184 prim.mode = hw_prim_type;
2185 prim.enable_primitive_restarts = pipeline->primitive_restart;
2186 prim.number_of_instances = instanceCount;
2187 prim.instance_length = indexCount;
2188 }
2189 }
2190 }
2191
2192 void
v3dX(cmd_buffer_emit_draw_indirect)2193 v3dX(cmd_buffer_emit_draw_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
2194 struct v3dv_buffer *buffer,
2195 VkDeviceSize offset,
2196 uint32_t drawCount,
2197 uint32_t stride)
2198 {
2199 struct v3dv_job *job = cmd_buffer->state.job;
2200 assert(job);
2201
2202 const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2203 uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2204
2205 v3dv_cl_ensure_space_with_branch(
2206 &job->bcl, cl_packet_length(INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS));
2207 v3dv_return_if_oom(cmd_buffer, NULL);
2208
2209 cl_emit(&job->bcl, INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
2210 prim.mode = hw_prim_type;
2211 prim.number_of_draw_indirect_array_records = drawCount;
2212 prim.stride_in_multiples_of_4_bytes = stride >> 2;
2213 prim.address = v3dv_cl_address(buffer->mem->bo,
2214 buffer->mem_offset + offset);
2215 }
2216 }
2217
2218 void
v3dX(cmd_buffer_emit_indexed_indirect)2219 v3dX(cmd_buffer_emit_indexed_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
2220 struct v3dv_buffer *buffer,
2221 VkDeviceSize offset,
2222 uint32_t drawCount,
2223 uint32_t stride)
2224 {
2225 struct v3dv_job *job = cmd_buffer->state.job;
2226 assert(job);
2227
2228 const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2229 uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2230 uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
2231
2232 v3dv_cl_ensure_space_with_branch(
2233 &job->bcl, cl_packet_length(INDIRECT_INDEXED_INSTANCED_PRIM_LIST));
2234 v3dv_return_if_oom(cmd_buffer, NULL);
2235
2236 cl_emit(&job->bcl, INDIRECT_INDEXED_INSTANCED_PRIM_LIST, prim) {
2237 prim.index_type = index_type;
2238 prim.mode = hw_prim_type;
2239 prim.enable_primitive_restarts = pipeline->primitive_restart;
2240 prim.number_of_draw_indirect_indexed_records = drawCount;
2241 prim.stride_in_multiples_of_4_bytes = stride >> 2;
2242 prim.address = v3dv_cl_address(buffer->mem->bo,
2243 buffer->mem_offset + offset);
2244 }
2245 }
2246
2247 void
v3dX(cmd_buffer_render_pass_setup_render_target)2248 v3dX(cmd_buffer_render_pass_setup_render_target)(struct v3dv_cmd_buffer *cmd_buffer,
2249 int rt,
2250 uint32_t *rt_bpp,
2251 uint32_t *rt_type,
2252 uint32_t *rt_clamp)
2253 {
2254 const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2255
2256 assert(state->subpass_idx < state->pass->subpass_count);
2257 const struct v3dv_subpass *subpass =
2258 &state->pass->subpasses[state->subpass_idx];
2259
2260 if (rt >= subpass->color_count)
2261 return;
2262
2263 struct v3dv_subpass_attachment *attachment = &subpass->color_attachments[rt];
2264 const uint32_t attachment_idx = attachment->attachment;
2265 if (attachment_idx == VK_ATTACHMENT_UNUSED)
2266 return;
2267
2268 const struct v3dv_framebuffer *framebuffer = state->framebuffer;
2269 assert(attachment_idx < framebuffer->attachment_count);
2270 struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
2271 assert(iview->vk.aspects & VK_IMAGE_ASPECT_COLOR_BIT);
2272
2273 *rt_bpp = iview->internal_bpp;
2274 *rt_type = iview->internal_type;
2275 if (vk_format_is_int(iview->vk.format))
2276 *rt_clamp = V3D_RENDER_TARGET_CLAMP_INT;
2277 else if (vk_format_is_srgb(iview->vk.format))
2278 *rt_clamp = V3D_RENDER_TARGET_CLAMP_NORM;
2279 else
2280 *rt_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
2281 }
2282