1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "ac_nir.h"
26 #include "ac_sqtt.h"
27 #include "si_build_pm4.h"
28 #include "util/u_cpu_detect.h"
29 #include "util/u_index_modify.h"
30 #include "util/u_prim.h"
31 #include "util/u_upload_mgr.h"
32 
33 #if (GFX_VER == 6)
34 #define GFX(name) name##GFX6
35 #elif (GFX_VER == 7)
36 #define GFX(name) name##GFX7
37 #elif (GFX_VER == 8)
38 #define GFX(name) name##GFX8
39 #elif (GFX_VER == 9)
40 #define GFX(name) name##GFX9
41 #elif (GFX_VER == 10)
42 #define GFX(name) name##GFX10
43 #elif (GFX_VER == 103)
44 #define GFX(name) name##GFX10_3
45 #else
46 #error "Unknown gfx version"
47 #endif
48 
49 /* special primitive types */
50 #define SI_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
51 
52 template<int NUM_INTERP>
si_emit_spi_map(struct si_context * sctx)53 static void si_emit_spi_map(struct si_context *sctx)
54 {
55    struct si_shader *ps = sctx->shader.ps.current;
56    struct si_shader_info *psinfo = ps ? &ps->selector->info : NULL;
57    unsigned spi_ps_input_cntl[NUM_INTERP];
58 
59    STATIC_ASSERT(NUM_INTERP >= 0 && NUM_INTERP <= 32);
60 
61    if (!NUM_INTERP)
62       return;
63 
64    struct si_shader *vs = si_get_vs(sctx)->current;
65    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
66 
67    for (unsigned i = 0; i < NUM_INTERP; i++) {
68       union si_input_info input = psinfo->input[i];
69       unsigned ps_input_cntl = vs->info.vs_output_ps_input_cntl[input.semantic];
70       bool non_default_val = G_028644_OFFSET(ps_input_cntl) != 0x20;
71 
72       if (non_default_val) {
73          if (input.interpolate == INTERP_MODE_FLAT ||
74              (input.interpolate == INTERP_MODE_COLOR && rs->flatshade))
75             ps_input_cntl |= S_028644_FLAT_SHADE(1);
76 
77          if (input.fp16_lo_hi_valid) {
78             ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) |
79                              S_028644_ATTR0_VALID(1) | /* this must be set if FP16_INTERP_MODE is set */
80                              S_028644_ATTR1_VALID(!!(input.fp16_lo_hi_valid & 0x2));
81          }
82       }
83 
84       if (input.semantic == VARYING_SLOT_PNTC ||
85           (input.semantic >= VARYING_SLOT_TEX0 && input.semantic <= VARYING_SLOT_TEX7 &&
86            rs->sprite_coord_enable & (1 << (input.semantic - VARYING_SLOT_TEX0)))) {
87          /* Overwrite the whole value (except OFFSET) for sprite coordinates. */
88          ps_input_cntl &= ~C_028644_OFFSET;
89          ps_input_cntl |= S_028644_PT_SPRITE_TEX(1);
90          if (input.fp16_lo_hi_valid & 0x1) {
91             ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) |
92                              S_028644_ATTR0_VALID(1);
93          }
94       }
95 
96       spi_ps_input_cntl[i] = ps_input_cntl;
97    }
98 
99    /* R_028644_SPI_PS_INPUT_CNTL_0 */
100    /* Dota 2: Only ~16% of SPI map updates set different values. */
101    /* Talos: Only ~9% of SPI map updates set different values. */
102    radeon_begin(&sctx->gfx_cs);
103    radeon_opt_set_context_regn(sctx, R_028644_SPI_PS_INPUT_CNTL_0, spi_ps_input_cntl,
104                                sctx->tracked_regs.spi_ps_input_cntl, NUM_INTERP);
105    radeon_end_update_context_roll(sctx);
106 }
107 
108 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
si_update_shaders(struct si_context * sctx)109 static bool si_update_shaders(struct si_context *sctx)
110 {
111    struct pipe_context *ctx = (struct pipe_context *)sctx;
112    struct si_shader *old_vs = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current;
113    unsigned old_pa_cl_vs_out_cntl = old_vs ? old_vs->pa_cl_vs_out_cntl : 0;
114    struct si_shader *old_ps = sctx->shader.ps.current;
115    unsigned old_spi_shader_col_format =
116       old_ps ? old_ps->key.ps.part.epilog.spi_shader_col_format : 0;
117    int r;
118 
119    /* Update TCS and TES. */
120    if (HAS_TESS) {
121       if (!sctx->tess_rings) {
122          si_init_tess_factor_ring(sctx);
123          if (!sctx->tess_rings)
124             return false;
125       }
126 
127       if (sctx->shader.tcs.cso) {
128          r = si_shader_select(ctx, &sctx->shader.tcs);
129          if (r)
130             return false;
131          si_pm4_bind_state(sctx, hs, sctx->shader.tcs.current);
132       } else {
133          if (!sctx->fixed_func_tcs_shader.cso) {
134             sctx->fixed_func_tcs_shader.cso =
135                (struct si_shader_selector*)si_create_fixed_func_tcs(sctx);
136             if (!sctx->fixed_func_tcs_shader.cso)
137                return false;
138 
139             sctx->fixed_func_tcs_shader.key.ge.part.tcs.epilog.invoc0_tess_factors_are_def =
140                sctx->fixed_func_tcs_shader.cso->info.tessfactors_are_def_in_all_invocs;
141          }
142 
143          r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader);
144          if (r)
145             return false;
146          si_pm4_bind_state(sctx, hs, sctx->fixed_func_tcs_shader.current);
147       }
148 
149       if (!HAS_GS || GFX_VERSION <= GFX8) {
150          r = si_shader_select(ctx, &sctx->shader.tes);
151          if (r)
152             return false;
153 
154          if (HAS_GS) {
155             /* TES as ES */
156             assert(GFX_VERSION <= GFX8);
157             si_pm4_bind_state(sctx, es, sctx->shader.tes.current);
158          } else if (NGG) {
159             si_pm4_bind_state(sctx, gs, sctx->shader.tes.current);
160          } else {
161             si_pm4_bind_state(sctx, vs, sctx->shader.tes.current);
162          }
163       }
164    } else {
165       if (GFX_VERSION <= GFX8) {
166          si_pm4_bind_state(sctx, ls, NULL);
167          sctx->prefetch_L2_mask &= ~SI_PREFETCH_LS;
168       }
169       si_pm4_bind_state(sctx, hs, NULL);
170       sctx->prefetch_L2_mask &= ~SI_PREFETCH_HS;
171    }
172 
173    /* Update GS. */
174    if (HAS_GS) {
175       r = si_shader_select(ctx, &sctx->shader.gs);
176       if (r)
177          return false;
178       si_pm4_bind_state(sctx, gs, sctx->shader.gs.current);
179       if (!NGG) {
180          si_pm4_bind_state(sctx, vs, sctx->shader.gs.current->gs_copy_shader);
181 
182          if (!si_update_gs_ring_buffers(sctx))
183             return false;
184       } else {
185          si_pm4_bind_state(sctx, vs, NULL);
186          sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
187       }
188    } else {
189       if (!NGG) {
190          si_pm4_bind_state(sctx, gs, NULL);
191          sctx->prefetch_L2_mask &= ~SI_PREFETCH_GS;
192          if (GFX_VERSION <= GFX8) {
193             si_pm4_bind_state(sctx, es, NULL);
194             sctx->prefetch_L2_mask &= ~SI_PREFETCH_ES;
195          }
196       }
197    }
198 
199    /* Update VS. */
200    if ((!HAS_TESS && !HAS_GS) || GFX_VERSION <= GFX8) {
201       r = si_shader_select(ctx, &sctx->shader.vs);
202       if (r)
203          return false;
204 
205       if (!HAS_TESS && !HAS_GS) {
206          if (NGG) {
207             si_pm4_bind_state(sctx, gs, sctx->shader.vs.current);
208             si_pm4_bind_state(sctx, vs, NULL);
209             sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
210          } else {
211             si_pm4_bind_state(sctx, vs, sctx->shader.vs.current);
212          }
213       } else if (HAS_TESS) {
214          si_pm4_bind_state(sctx, ls, sctx->shader.vs.current);
215       } else {
216          assert(HAS_GS);
217          si_pm4_bind_state(sctx, es, sctx->shader.vs.current);
218       }
219    }
220 
221    if (GFX_VERSION >= GFX9 && HAS_TESS)
222       sctx->vs_uses_base_instance = sctx->queued.named.hs->uses_base_instance;
223    else if (GFX_VERSION >= GFX9 && HAS_GS)
224       sctx->vs_uses_base_instance = sctx->shader.gs.current->uses_base_instance;
225    else
226       sctx->vs_uses_base_instance = sctx->shader.vs.current->uses_base_instance;
227 
228    union si_vgt_stages_key key;
229    key.index = 0;
230 
231    /* Update VGT_SHADER_STAGES_EN. */
232    if (HAS_TESS) {
233       key.u.tess = 1;
234       if (GFX_VERSION >= GFX10)
235          key.u.hs_wave32 = sctx->queued.named.hs->wave_size == 32;
236    }
237    if (HAS_GS)
238       key.u.gs = 1;
239    if (NGG) {
240       key.index |= si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->ctx_reg.ngg.vgt_stages.index;
241    } else if (GFX_VERSION >= GFX10) {
242       if (HAS_GS) {
243          key.u.gs_wave32 = sctx->shader.gs.current->wave_size == 32;
244          key.u.vs_wave32 = sctx->shader.gs.current->gs_copy_shader->wave_size == 32;
245       } else {
246          key.u.vs_wave32 = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->wave_size == 32;
247       }
248    }
249 
250    struct si_pm4_state **pm4 = &sctx->vgt_shader_config[key.index];
251    if (unlikely(!*pm4))
252       *pm4 = si_build_vgt_shader_config(sctx->screen, key);
253    si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
254 
255    if (old_pa_cl_vs_out_cntl !=
256           si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->pa_cl_vs_out_cntl)
257       si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
258 
259    r = si_shader_select(ctx, &sctx->shader.ps);
260    if (r)
261       return false;
262    si_pm4_bind_state(sctx, ps, sctx->shader.ps.current);
263 
264    if (si_pm4_state_changed(sctx, ps) ||
265        (!NGG && si_pm4_state_changed(sctx, vs)) ||
266        (NGG && si_pm4_state_changed(sctx, gs))) {
267       sctx->atoms.s.spi_map.emit = sctx->emit_spi_map[sctx->shader.ps.current->ctx_reg.ps.num_interp];
268       si_mark_atom_dirty(sctx, &sctx->atoms.s.spi_map);
269    }
270 
271    if ((GFX_VERSION >= GFX10_3 || (GFX_VERSION >= GFX9 && sctx->screen->info.rbplus_allowed)) &&
272        si_pm4_state_changed(sctx, ps) &&
273        (!old_ps || old_spi_shader_col_format !=
274                       sctx->shader.ps.current->key.ps.part.epilog.spi_shader_col_format))
275       si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
276 
277    if (sctx->smoothing_enabled !=
278        sctx->shader.ps.current->key.ps.mono.poly_line_smoothing) {
279       sctx->smoothing_enabled = sctx->shader.ps.current->key.ps.mono.poly_line_smoothing;
280       si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
281 
282       /* NGG cull state uses smoothing_enabled. */
283       if (GFX_VERSION >= GFX10 && sctx->screen->use_ngg_culling)
284          si_mark_atom_dirty(sctx, &sctx->atoms.s.ngg_cull_state);
285 
286       if (GFX_VERSION == GFX6)
287          si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
288 
289       if (sctx->framebuffer.nr_samples <= 1)
290          si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_sample_locs);
291    }
292 
293    if (unlikely(sctx->screen->debug_flags & DBG(SQTT) && sctx->thread_trace)) {
294       /* Pretend the bound shaders form a vk pipeline */
295       uint32_t pipeline_code_hash = 0;
296       uint64_t base_address = ~0;
297 
298       for (int i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
299          struct si_shader *shader = sctx->shaders[i].current;
300          if (sctx->shaders[i].cso && shader) {
301             pipeline_code_hash = _mesa_hash_data_with_seed(
302                shader->binary.elf_buffer,
303                shader->binary.elf_size,
304                pipeline_code_hash);
305             base_address = MIN2(base_address,
306                                 shader->bo->gpu_address);
307          }
308       }
309 
310       struct ac_thread_trace_data *thread_trace_data = sctx->thread_trace;
311       if (!si_sqtt_pipeline_is_registered(thread_trace_data, pipeline_code_hash)) {
312          si_sqtt_register_pipeline(sctx, pipeline_code_hash, base_address, false);
313       }
314 
315       si_sqtt_describe_pipeline_bind(sctx, pipeline_code_hash, 0);
316    }
317 
318    if ((GFX_VERSION <= GFX8 &&
319         (si_pm4_state_enabled_and_changed(sctx, ls) || si_pm4_state_enabled_and_changed(sctx, es))) ||
320        si_pm4_state_enabled_and_changed(sctx, hs) || si_pm4_state_enabled_and_changed(sctx, gs) ||
321        si_pm4_state_enabled_and_changed(sctx, vs) || si_pm4_state_enabled_and_changed(sctx, ps)) {
322       unsigned scratch_size = 0;
323 
324       if (HAS_TESS) {
325          if (GFX_VERSION <= GFX8) /* LS */
326             scratch_size = MAX2(scratch_size, sctx->shader.vs.current->config.scratch_bytes_per_wave);
327 
328          scratch_size = MAX2(scratch_size, sctx->queued.named.hs->config.scratch_bytes_per_wave);
329 
330          if (HAS_GS) {
331             if (GFX_VERSION <= GFX8) /* ES */
332                scratch_size = MAX2(scratch_size, sctx->shader.tes.current->config.scratch_bytes_per_wave);
333 
334             scratch_size = MAX2(scratch_size, sctx->shader.gs.current->config.scratch_bytes_per_wave);
335          } else {
336             scratch_size = MAX2(scratch_size, sctx->shader.tes.current->config.scratch_bytes_per_wave);
337          }
338       } else if (HAS_GS) {
339          if (GFX_VERSION <= GFX8) /* ES */
340             scratch_size = MAX2(scratch_size, sctx->shader.vs.current->config.scratch_bytes_per_wave);
341 
342          scratch_size = MAX2(scratch_size, sctx->shader.gs.current->config.scratch_bytes_per_wave);
343       } else {
344          scratch_size = MAX2(scratch_size, sctx->shader.vs.current->config.scratch_bytes_per_wave);
345       }
346 
347       scratch_size = MAX2(scratch_size, sctx->shader.ps.current->config.scratch_bytes_per_wave);
348 
349       if (scratch_size && !si_update_spi_tmpring_size(sctx, scratch_size))
350          return false;
351 
352       if (GFX_VERSION >= GFX7) {
353          if (GFX_VERSION <= GFX8 && HAS_TESS && si_pm4_state_enabled_and_changed(sctx, ls))
354             sctx->prefetch_L2_mask |= SI_PREFETCH_LS;
355 
356          if (HAS_TESS && si_pm4_state_enabled_and_changed(sctx, hs))
357             sctx->prefetch_L2_mask |= SI_PREFETCH_HS;
358 
359          if (GFX_VERSION <= GFX8 && HAS_GS && si_pm4_state_enabled_and_changed(sctx, es))
360             sctx->prefetch_L2_mask |= SI_PREFETCH_ES;
361 
362          if ((HAS_GS || NGG) && si_pm4_state_enabled_and_changed(sctx, gs))
363             sctx->prefetch_L2_mask |= SI_PREFETCH_GS;
364 
365          if (!NGG && si_pm4_state_enabled_and_changed(sctx, vs))
366             sctx->prefetch_L2_mask |= SI_PREFETCH_VS;
367 
368          if (si_pm4_state_enabled_and_changed(sctx, ps))
369             sctx->prefetch_L2_mask |= SI_PREFETCH_PS;
370       }
371    }
372 
373    sctx->do_update_shaders = false;
374    return true;
375 }
376 
377 ALWAYS_INLINE
si_conv_pipe_prim(unsigned mode)378 static unsigned si_conv_pipe_prim(unsigned mode)
379 {
380    static const unsigned prim_conv[] = {
381       [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
382       [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
383       [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
384       [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
385       [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
386       [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
387       [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
388       [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
389       [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
390       [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
391       [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
392       [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
393       [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
394       [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
395       [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
396       [SI_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST};
397    assert(mode < ARRAY_SIZE(prim_conv));
398    return prim_conv[mode];
399 }
400 
si_prefetch_shader_async(struct si_context * sctx,struct si_shader * shader)401 static void si_prefetch_shader_async(struct si_context *sctx, struct si_shader *shader)
402 {
403    struct pipe_resource *bo = &shader->bo->b.b;
404 
405    si_cp_dma_prefetch(sctx, bo, 0, bo->width0);
406 }
407 
408 enum si_L2_prefetch_mode {
409    PREFETCH_BEFORE_DRAW = 1,
410    PREFETCH_AFTER_DRAW,
411    PREFETCH_ALL,
412 };
413 
414 /**
415  * Prefetch shaders.
416  */
417 template<chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
418          si_L2_prefetch_mode mode>
si_prefetch_shaders(struct si_context * sctx)419 static void si_prefetch_shaders(struct si_context *sctx)
420 {
421    unsigned mask = sctx->prefetch_L2_mask;
422 
423    /* GFX6 doesn't support the L2 prefetch. */
424    if (GFX_VERSION < GFX7 || !mask)
425       return;
426 
427    /* Prefetch shaders and VBO descriptors to TC L2. */
428    if (GFX_VERSION >= GFX9) {
429       /* Choose the right spot for the VBO prefetch. */
430       if (HAS_TESS) {
431          if (mode != PREFETCH_AFTER_DRAW) {
432             if (mask & SI_PREFETCH_HS)
433                si_prefetch_shader_async(sctx, sctx->queued.named.hs);
434 
435             if (mode == PREFETCH_BEFORE_DRAW)
436                return;
437          }
438 
439          if ((HAS_GS || NGG) && mask & SI_PREFETCH_GS)
440             si_prefetch_shader_async(sctx, sctx->queued.named.gs);
441          if (!NGG && mask & SI_PREFETCH_VS)
442             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
443       } else if (HAS_GS || NGG) {
444          if (mode != PREFETCH_AFTER_DRAW) {
445             if (mask & SI_PREFETCH_GS)
446                si_prefetch_shader_async(sctx, sctx->queued.named.gs);
447 
448             if (mode == PREFETCH_BEFORE_DRAW)
449                return;
450          }
451 
452          if (!NGG && mask & SI_PREFETCH_VS)
453             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
454       } else {
455          if (mode != PREFETCH_AFTER_DRAW) {
456             if (mask & SI_PREFETCH_VS)
457                si_prefetch_shader_async(sctx, sctx->queued.named.vs);
458 
459             if (mode == PREFETCH_BEFORE_DRAW)
460                return;
461          }
462       }
463    } else {
464       /* GFX6-GFX8 */
465       /* Choose the right spot for the VBO prefetch. */
466       if (HAS_TESS) {
467          if (mode != PREFETCH_AFTER_DRAW) {
468             if (mask & SI_PREFETCH_LS)
469                si_prefetch_shader_async(sctx, sctx->queued.named.ls);
470 
471             if (mode == PREFETCH_BEFORE_DRAW)
472                return;
473          }
474 
475          if (mask & SI_PREFETCH_HS)
476             si_prefetch_shader_async(sctx, sctx->queued.named.hs);
477          if (mask & SI_PREFETCH_ES)
478             si_prefetch_shader_async(sctx, sctx->queued.named.es);
479          if (mask & SI_PREFETCH_GS)
480             si_prefetch_shader_async(sctx, sctx->queued.named.gs);
481          if (mask & SI_PREFETCH_VS)
482             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
483       } else if (HAS_GS) {
484          if (mode != PREFETCH_AFTER_DRAW) {
485             if (mask & SI_PREFETCH_ES)
486                si_prefetch_shader_async(sctx, sctx->queued.named.es);
487 
488             if (mode == PREFETCH_BEFORE_DRAW)
489                return;
490          }
491 
492          if (mask & SI_PREFETCH_GS)
493             si_prefetch_shader_async(sctx, sctx->queued.named.gs);
494          if (mask & SI_PREFETCH_VS)
495             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
496       } else {
497          if (mode != PREFETCH_AFTER_DRAW) {
498             if (mask & SI_PREFETCH_VS)
499                si_prefetch_shader_async(sctx, sctx->queued.named.vs);
500 
501             if (mode == PREFETCH_BEFORE_DRAW)
502                return;
503          }
504       }
505    }
506 
507    if (mask & SI_PREFETCH_PS)
508       si_prefetch_shader_async(sctx, sctx->queued.named.ps);
509 
510    /* This must be cleared only when AFTER_DRAW is true. */
511    sctx->prefetch_L2_mask = 0;
512 }
513 
514 /**
515  * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
516  * LS.LDS_SIZE is shared by all 3 shader stages.
517  *
518  * The information about LDS and other non-compile-time parameters is then
519  * written to userdata SGPRs.
520  */
si_emit_derived_tess_state(struct si_context * sctx,unsigned * num_patches)521 static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_patches)
522 {
523    struct si_shader *ls_current;
524    struct si_shader_selector *ls;
525    /* The TES pointer will only be used for sctx->last_tcs.
526     * It would be wrong to think that TCS = TES. */
527    struct si_shader_selector *tcs =
528       sctx->shader.tcs.cso ? sctx->shader.tcs.cso : sctx->shader.tes.cso;
529    unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
530    bool has_primid_instancing_bug = sctx->chip_class == GFX6 && sctx->screen->info.max_se == 1;
531    unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
532    uint8_t num_tcs_input_cp = sctx->patch_vertices;
533 
534    /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
535    if (sctx->chip_class >= GFX9) {
536       if (sctx->shader.tcs.cso)
537          ls_current = sctx->shader.tcs.current;
538       else
539          ls_current = sctx->fixed_func_tcs_shader.current;
540 
541       ls = ls_current->key.ge.part.tcs.ls;
542    } else {
543       ls_current = sctx->shader.vs.current;
544       ls = sctx->shader.vs.cso;
545    }
546 
547    if (sctx->last_ls == ls_current && sctx->last_tcs == tcs &&
548        sctx->last_tes_sh_base == tes_sh_base && sctx->last_num_tcs_input_cp == num_tcs_input_cp &&
549        (!has_primid_instancing_bug || (sctx->last_tess_uses_primid == tess_uses_primid))) {
550       *num_patches = sctx->last_num_patches;
551       return;
552    }
553 
554    sctx->last_ls = ls_current;
555    sctx->last_tcs = tcs;
556    sctx->last_tes_sh_base = tes_sh_base;
557    sctx->last_num_tcs_input_cp = num_tcs_input_cp;
558    sctx->last_tess_uses_primid = tess_uses_primid;
559 
560    /* This calculates how shader inputs and outputs among VS, TCS, and TES
561     * are laid out in LDS. */
562    unsigned num_tcs_inputs = util_last_bit64(ls->outputs_written);
563    unsigned num_tcs_output_cp, num_tcs_outputs, num_tcs_patch_outputs;
564 
565    if (sctx->shader.tcs.cso) {
566       num_tcs_outputs = util_last_bit64(tcs->outputs_written);
567       num_tcs_output_cp = tcs->info.base.tess.tcs_vertices_out;
568       num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
569    } else {
570       /* No TCS. Route varyings from LS to TES. */
571       num_tcs_outputs = num_tcs_inputs;
572       num_tcs_output_cp = num_tcs_input_cp;
573       num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
574    }
575 
576    unsigned input_vertex_size = ls->lshs_vertex_stride;
577    unsigned output_vertex_size = num_tcs_outputs * 16;
578    unsigned input_patch_size;
579 
580    /* Allocate LDS for TCS inputs only if it's used. */
581    if (!ls_current->key.ge.opt.same_patch_vertices ||
582        tcs->info.base.inputs_read & ~tcs->tcs_vgpr_only_inputs)
583       input_patch_size = num_tcs_input_cp * input_vertex_size;
584    else
585       input_patch_size = 0;
586 
587    unsigned pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
588    unsigned output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
589    unsigned lds_per_patch;
590 
591    /* Compute the LDS size per patch.
592     *
593     * LDS is used to store TCS outputs if they are read, and to store tess
594     * factors if they are not defined in all invocations.
595     */
596    if (tcs->info.base.outputs_read ||
597        tcs->info.base.patch_outputs_read ||
598        !tcs->info.tessfactors_are_def_in_all_invocs) {
599       lds_per_patch = input_patch_size + output_patch_size;
600    } else {
601       /* LDS will only store TCS inputs. The offchip buffer will only store TCS outputs. */
602       lds_per_patch = MAX2(input_patch_size, output_patch_size);
603    }
604 
605    /* Ensure that we only need 4 waves per CU, so that we don't need to check
606     * resource usage (such as whether we have enough VGPRs to fit the whole
607     * threadgroup into the CU). It also ensures that the number of tcs in and out
608     * vertices per threadgroup are at most 256, which is the hw limit.
609     */
610    unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
611    *num_patches = 256 / max_verts_per_patch;
612 
613    /* Not necessary for correctness, but higher numbers are slower.
614     * The hardware can do more, but the radeonsi shader constant is
615     * limited to 6 bits.
616     */
617    *num_patches = MIN2(*num_patches, 64); /* e.g. 64 triangles in exactly 3 waves */
618 
619    /* When distributed tessellation is unsupported, switch between SEs
620     * at a higher frequency to manually balance the workload between SEs.
621     */
622    if (!sctx->screen->info.has_distributed_tess && sctx->screen->info.max_se > 1)
623       *num_patches = MIN2(*num_patches, 16); /* recommended */
624 
625    /* Make sure the output data fits in the offchip buffer */
626    *num_patches =
627       MIN2(*num_patches, (sctx->screen->tess_offchip_block_dw_size * 4) / output_patch_size);
628 
629    /* Make sure that the data fits in LDS. This assumes the shaders only
630     * use LDS for the inputs and outputs.
631     *
632     * The maximum allowed LDS size is 32K. Higher numbers can hang.
633     * Use 16K as the maximum, so that we can fit 2 workgroups on the same CU.
634     */
635    ASSERTED unsigned max_lds_size = 32 * 1024; /* hw limit */
636    unsigned target_lds_size = 16 * 1024; /* target at least 2 workgroups per CU, 16K each */
637    *num_patches = MIN2(*num_patches, target_lds_size / lds_per_patch);
638    *num_patches = MAX2(*num_patches, 1);
639    assert(*num_patches * lds_per_patch <= max_lds_size);
640 
641    /* Make sure that vector lanes are fully occupied by cutting off the last wave
642     * if it's only partially filled.
643     */
644    unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
645    unsigned wave_size = ls_current->wave_size;
646 
647    if (temp_verts_per_tg > wave_size &&
648        (wave_size - temp_verts_per_tg % wave_size >= MAX2(max_verts_per_patch, 8)))
649       *num_patches = (temp_verts_per_tg & ~(wave_size - 1)) / max_verts_per_patch;
650 
651    if (sctx->chip_class == GFX6) {
652       /* GFX6 bug workaround, related to power management. Limit LS-HS
653        * threadgroups to only one wave.
654        */
655       unsigned one_wave = wave_size / max_verts_per_patch;
656       *num_patches = MIN2(*num_patches, one_wave);
657    }
658 
659    /* The VGT HS block increments the patch ID unconditionally
660     * within a single threadgroup. This results in incorrect
661     * patch IDs when instanced draws are used.
662     *
663     * The intended solution is to restrict threadgroups to
664     * a single instance by setting SWITCH_ON_EOI, which
665     * should cause IA to split instances up. However, this
666     * doesn't work correctly on GFX6 when there is no other
667     * SE to switch to.
668     */
669    if (has_primid_instancing_bug && tess_uses_primid)
670       *num_patches = 1;
671 
672    sctx->last_num_patches = *num_patches;
673 
674    unsigned output_patch0_offset = input_patch_size * *num_patches;
675    unsigned perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
676 
677    /* Compute userdata SGPRs. */
678    assert(((input_vertex_size / 4) & ~0xff) == 0);
679    assert(((output_vertex_size / 4) & ~0xff) == 0);
680    assert(((input_patch_size / 4) & ~0x1fff) == 0);
681    assert(((output_patch_size / 4) & ~0x1fff) == 0);
682    assert(((output_patch0_offset / 16) & ~0xffff) == 0);
683    assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
684    assert(num_tcs_input_cp <= 32);
685    assert(num_tcs_output_cp <= 32);
686    assert(*num_patches <= 64);
687    assert(((pervertex_output_patch_size * *num_patches) & ~0x1fffff) == 0);
688 
689    uint64_t ring_va = (unlikely(sctx->ws->cs_is_secure(&sctx->gfx_cs)) ?
690       si_resource(sctx->tess_rings_tmz) : si_resource(sctx->tess_rings))->gpu_address;
691    assert((ring_va & u_bit_consecutive(0, 19)) == 0);
692 
693    unsigned tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
694                             S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
695    unsigned tcs_out_layout = (output_patch_size / 4) | (num_tcs_input_cp << 13) | ring_va;
696    unsigned tcs_out_offsets = (output_patch0_offset / 16) | ((perpatch_output_offset / 16) << 16);
697    unsigned offchip_layout =
698       (*num_patches - 1) | ((num_tcs_output_cp - 1) << 6) |
699       ((pervertex_output_patch_size * *num_patches) << 11);
700 
701    /* Compute the LDS size. */
702    unsigned lds_size = lds_per_patch * *num_patches;
703 
704    if (sctx->chip_class >= GFX7) {
705       assert(lds_size <= 65536);
706       lds_size = align(lds_size, 512) / 512;
707    } else {
708       assert(lds_size <= 32768);
709       lds_size = align(lds_size, 256) / 256;
710    }
711 
712    /* Set SI_SGPR_VS_STATE_BITS. */
713    sctx->current_vs_state &= C_VS_STATE_LS_OUT_PATCH_SIZE & C_VS_STATE_LS_OUT_VERTEX_SIZE;
714    sctx->current_vs_state |= tcs_in_layout;
715 
716    /* We should be able to support in-shader LDS use with LLVM >= 9
717     * by just adding the lds_sizes together, but it has never
718     * been tested. */
719    assert(ls_current->config.lds_size == 0);
720 
721    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
722    radeon_begin(cs);
723 
724    if (sctx->chip_class >= GFX9) {
725       unsigned hs_rsrc2 = ls_current->config.rsrc2;
726 
727       if (sctx->chip_class >= GFX10)
728          hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX10(lds_size);
729       else
730          hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(lds_size);
731 
732       radeon_set_sh_reg(R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
733 
734       /* Set userdata SGPRs for merged LS-HS. */
735       radeon_set_sh_reg_seq(
736          R_00B430_SPI_SHADER_USER_DATA_LS_0 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
737       radeon_emit(offchip_layout);
738       radeon_emit(tcs_out_offsets);
739       radeon_emit(tcs_out_layout);
740    } else {
741       unsigned ls_rsrc2 = ls_current->config.rsrc2;
742 
743       si_multiwave_lds_size_workaround(sctx->screen, &lds_size);
744       ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size);
745 
746       /* Due to a hw bug, RSRC2_LS must be written twice with another
747        * LS register written in between. */
748       if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
749          radeon_set_sh_reg(R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
750       radeon_set_sh_reg_seq(R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
751       radeon_emit(ls_current->config.rsrc1);
752       radeon_emit(ls_rsrc2);
753 
754       /* Set userdata SGPRs for TCS. */
755       radeon_set_sh_reg_seq(
756          R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
757       radeon_emit(offchip_layout);
758       radeon_emit(tcs_out_offsets);
759       radeon_emit(tcs_out_layout);
760       radeon_emit(tcs_in_layout);
761    }
762 
763    /* Set userdata SGPRs for TES. */
764    radeon_set_sh_reg_seq(tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
765    radeon_emit(offchip_layout);
766    radeon_emit(ring_va);
767    radeon_end();
768 
769    unsigned ls_hs_config =
770          S_028B58_NUM_PATCHES(*num_patches) |
771          S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
772          S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
773 
774    if (sctx->last_ls_hs_config != ls_hs_config) {
775       radeon_begin(cs);
776       if (sctx->chip_class >= GFX7) {
777          radeon_set_context_reg_idx(R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config);
778       } else {
779          radeon_set_context_reg(R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
780       }
781       radeon_end_update_context_roll(sctx);
782       sctx->last_ls_hs_config = ls_hs_config;
783    }
784 }
785 
si_num_prims_for_vertices(enum pipe_prim_type prim,unsigned count,unsigned vertices_per_patch)786 static unsigned si_num_prims_for_vertices(enum pipe_prim_type prim,
787                                           unsigned count, unsigned vertices_per_patch)
788 {
789    switch (prim) {
790    case PIPE_PRIM_PATCHES:
791       return count / vertices_per_patch;
792    case PIPE_PRIM_POLYGON:
793       /* It's a triangle fan with different edge flags. */
794       return count >= 3 ? count - 2 : 0;
795    case SI_PRIM_RECTANGLE_LIST:
796       return count / 3;
797    default:
798       return u_decomposed_prims_for_vertices(prim, count);
799    }
800 }
801 
si_get_init_multi_vgt_param(struct si_screen * sscreen,union si_vgt_param_key * key)802 static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_vgt_param_key *key)
803 {
804    STATIC_ASSERT(sizeof(union si_vgt_param_key) == 2);
805    unsigned max_primgroup_in_wave = 2;
806 
807    /* SWITCH_ON_EOP(0) is always preferable. */
808    bool wd_switch_on_eop = false;
809    bool ia_switch_on_eop = false;
810    bool ia_switch_on_eoi = false;
811    bool partial_vs_wave = false;
812    bool partial_es_wave = false;
813 
814    if (key->u.uses_tess) {
815       /* SWITCH_ON_EOI must be set if PrimID is used. */
816       if (key->u.tess_uses_prim_id)
817          ia_switch_on_eoi = true;
818 
819       /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
820       if ((sscreen->info.family == CHIP_TAHITI || sscreen->info.family == CHIP_PITCAIRN ||
821            sscreen->info.family == CHIP_BONAIRE) &&
822           key->u.uses_gs)
823          partial_vs_wave = true;
824 
825       /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
826       if (sscreen->info.has_distributed_tess) {
827          if (key->u.uses_gs) {
828             if (sscreen->info.chip_class == GFX8)
829                partial_es_wave = true;
830          } else {
831             partial_vs_wave = true;
832          }
833       }
834    }
835 
836    /* This is a hardware requirement. */
837    if (key->u.line_stipple_enabled || (sscreen->debug_flags & DBG(SWITCH_ON_EOP))) {
838       ia_switch_on_eop = true;
839       wd_switch_on_eop = true;
840    }
841 
842    if (sscreen->info.chip_class >= GFX7) {
843       /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
844        * 4 shader engines. Set 1 to pass the assertion below.
845        * The other cases are hardware requirements.
846        *
847        * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
848        * for points, line strips, and tri strips.
849        */
850       if (sscreen->info.max_se <= 2 || key->u.prim == PIPE_PRIM_POLYGON ||
851           key->u.prim == PIPE_PRIM_LINE_LOOP || key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
852           key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
853           (key->u.primitive_restart &&
854            (sscreen->info.family < CHIP_POLARIS10 ||
855             (key->u.prim != PIPE_PRIM_POINTS && key->u.prim != PIPE_PRIM_LINE_STRIP &&
856              key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
857           key->u.count_from_stream_output)
858          wd_switch_on_eop = true;
859 
860       /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
861        * We don't know that for indirect drawing, so treat it as
862        * always problematic. */
863       if (sscreen->info.family == CHIP_HAWAII && key->u.uses_instancing)
864          wd_switch_on_eop = true;
865 
866       /* Performance recommendation for 4 SE Gfx7-8 parts if
867        * instances are smaller than a primgroup.
868        * Assume indirect draws always use small instances.
869        * This is needed for good VS wave utilization.
870        */
871       if (sscreen->info.chip_class <= GFX8 && sscreen->info.max_se == 4 &&
872           key->u.multi_instances_smaller_than_primgroup)
873          wd_switch_on_eop = true;
874 
875       /* Required on GFX7 and later. */
876       if (sscreen->info.max_se == 4 && !wd_switch_on_eop)
877          ia_switch_on_eoi = true;
878 
879       /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
880        * to work around a GS hang.
881        */
882       if (key->u.uses_gs &&
883           (sscreen->info.family == CHIP_TONGA || sscreen->info.family == CHIP_FIJI ||
884            sscreen->info.family == CHIP_POLARIS10 || sscreen->info.family == CHIP_POLARIS11 ||
885            sscreen->info.family == CHIP_POLARIS12 || sscreen->info.family == CHIP_VEGAM))
886          partial_vs_wave = true;
887 
888       /* Required by Hawaii and, for some special cases, by GFX8. */
889       if (ia_switch_on_eoi &&
890           (sscreen->info.family == CHIP_HAWAII ||
891            (sscreen->info.chip_class == GFX8 && (key->u.uses_gs || max_primgroup_in_wave != 2))))
892          partial_vs_wave = true;
893 
894       /* Instancing bug on Bonaire. */
895       if (sscreen->info.family == CHIP_BONAIRE && ia_switch_on_eoi && key->u.uses_instancing)
896          partial_vs_wave = true;
897 
898       /* This only applies to Polaris10 and later 4 SE chips.
899        * wd_switch_on_eop is already true on all other chips.
900        */
901       if (!wd_switch_on_eop && key->u.primitive_restart)
902          partial_vs_wave = true;
903 
904       /* If the WD switch is false, the IA switch must be false too. */
905       assert(wd_switch_on_eop || !ia_switch_on_eop);
906    }
907 
908    /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
909    if (sscreen->info.chip_class <= GFX8 && ia_switch_on_eoi)
910       partial_es_wave = true;
911 
912    return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
913           S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
914           S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
915           S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= GFX7 ? wd_switch_on_eop : 0) |
916           /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
917           S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == GFX8 ? max_primgroup_in_wave
918                                                                         : 0) |
919           S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
920           S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
921 }
922 
si_init_ia_multi_vgt_param_table(struct si_context * sctx)923 static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
924 {
925    for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
926       for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
927          for (int multi_instances = 0; multi_instances < 2; multi_instances++)
928             for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
929                for (int count_from_so = 0; count_from_so < 2; count_from_so++)
930                   for (int line_stipple = 0; line_stipple < 2; line_stipple++)
931                      for (int uses_tess = 0; uses_tess < 2; uses_tess++)
932                         for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
933                            for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
934                               union si_vgt_param_key key;
935 
936                               key.index = 0;
937                               key.u.prim = prim;
938                               key.u.uses_instancing = uses_instancing;
939                               key.u.multi_instances_smaller_than_primgroup = multi_instances;
940                               key.u.primitive_restart = primitive_restart;
941                               key.u.count_from_stream_output = count_from_so;
942                               key.u.line_stipple_enabled = line_stipple;
943                               key.u.uses_tess = uses_tess;
944                               key.u.tess_uses_prim_id = tess_uses_primid;
945                               key.u.uses_gs = uses_gs;
946 
947                               sctx->ia_multi_vgt_param[key.index] =
948                                  si_get_init_multi_vgt_param(sctx->screen, &key);
949                            }
950 }
951 
si_is_line_stipple_enabled(struct si_context * sctx)952 static bool si_is_line_stipple_enabled(struct si_context *sctx)
953 {
954    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
955 
956    return rs->line_stipple_enable && sctx->current_rast_prim != PIPE_PRIM_POINTS &&
957           (rs->polygon_mode_is_lines || util_prim_is_lines(sctx->current_rast_prim));
958 }
959 
960 enum si_is_draw_vertex_state {
961    DRAW_VERTEX_STATE_OFF,
962    DRAW_VERTEX_STATE_ON,
963 };
964 
965 template <si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
num_instanced_prims_less_than(const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned min_vertex_count,unsigned instance_count,unsigned num_prims,ubyte vertices_per_patch)966 static bool num_instanced_prims_less_than(const struct pipe_draw_indirect_info *indirect,
967                                           enum pipe_prim_type prim,
968                                           unsigned min_vertex_count,
969                                           unsigned instance_count,
970                                           unsigned num_prims,
971                                           ubyte vertices_per_patch)
972 {
973    if (IS_DRAW_VERTEX_STATE)
974       return 0;
975 
976    if (indirect) {
977       return indirect->buffer ||
978              (instance_count > 1 && indirect->count_from_stream_output);
979    } else {
980       return instance_count > 1 &&
981              si_num_prims_for_vertices(prim, min_vertex_count, vertices_per_patch) < num_prims;
982    }
983 }
984 
985 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
986           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_get_ia_multi_vgt_param(struct si_context * sctx,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned num_patches,unsigned instance_count,bool primitive_restart,unsigned min_vertex_count)987 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
988                                           const struct pipe_draw_indirect_info *indirect,
989                                           enum pipe_prim_type prim, unsigned num_patches,
990                                           unsigned instance_count, bool primitive_restart,
991                                           unsigned min_vertex_count)
992 {
993    union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
994    unsigned primgroup_size;
995    unsigned ia_multi_vgt_param;
996 
997    if (HAS_TESS) {
998       primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
999    } else if (HAS_GS) {
1000       primgroup_size = 64; /* recommended with a GS */
1001    } else {
1002       primgroup_size = 128; /* recommended without a GS and tess */
1003    }
1004 
1005    key.u.prim = prim;
1006    key.u.uses_instancing = !IS_DRAW_VERTEX_STATE &&
1007                            ((indirect && indirect->buffer) || instance_count > 1);
1008    key.u.multi_instances_smaller_than_primgroup =
1009       num_instanced_prims_less_than<IS_DRAW_VERTEX_STATE>(indirect, prim, min_vertex_count,
1010                                                           instance_count, primgroup_size,
1011                                                           sctx->patch_vertices);
1012    key.u.primitive_restart = !IS_DRAW_VERTEX_STATE && primitive_restart;
1013    key.u.count_from_stream_output = !IS_DRAW_VERTEX_STATE && indirect &&
1014                                     indirect->count_from_stream_output;
1015    key.u.line_stipple_enabled = si_is_line_stipple_enabled(sctx);
1016 
1017    ia_multi_vgt_param =
1018       sctx->ia_multi_vgt_param[key.index] | S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
1019 
1020    if (HAS_GS) {
1021       /* GS requirement. */
1022       if (GFX_VERSION <= GFX8 &&
1023           SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
1024          ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
1025 
1026       /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
1027        * The hw doc says all multi-SE chips are affected, but Vulkan
1028        * only applies it to Hawaii. Do what Vulkan does.
1029        */
1030       if (GFX_VERSION == GFX7 &&
1031           sctx->family == CHIP_HAWAII && G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
1032           num_instanced_prims_less_than<IS_DRAW_VERTEX_STATE>(indirect, prim, min_vertex_count,
1033                                                               instance_count, 2, sctx->patch_vertices))
1034          sctx->flags |= SI_CONTEXT_VGT_FLUSH;
1035    }
1036 
1037    return ia_multi_vgt_param;
1038 }
1039 
1040 ALWAYS_INLINE
si_conv_prim_to_gs_out(unsigned mode)1041 static unsigned si_conv_prim_to_gs_out(unsigned mode)
1042 {
1043    static const int prim_conv[] = {
1044       [PIPE_PRIM_POINTS] = V_028A6C_POINTLIST,
1045       [PIPE_PRIM_LINES] = V_028A6C_LINESTRIP,
1046       [PIPE_PRIM_LINE_LOOP] = V_028A6C_LINESTRIP,
1047       [PIPE_PRIM_LINE_STRIP] = V_028A6C_LINESTRIP,
1048       [PIPE_PRIM_TRIANGLES] = V_028A6C_TRISTRIP,
1049       [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_TRISTRIP,
1050       [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_TRISTRIP,
1051       [PIPE_PRIM_QUADS] = V_028A6C_TRISTRIP,
1052       [PIPE_PRIM_QUAD_STRIP] = V_028A6C_TRISTRIP,
1053       [PIPE_PRIM_POLYGON] = V_028A6C_TRISTRIP,
1054       [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_LINESTRIP,
1055       [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_LINESTRIP,
1056       [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_TRISTRIP,
1057       [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_TRISTRIP,
1058       [PIPE_PRIM_PATCHES] = V_028A6C_POINTLIST,
1059       [SI_PRIM_RECTANGLE_LIST] = V_028A6C_RECTLIST,
1060    };
1061    assert(mode < ARRAY_SIZE(prim_conv));
1062 
1063    return prim_conv[mode];
1064 }
1065 
1066 /* rast_prim is the primitive type after GS. */
1067 template<chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
si_emit_rasterizer_prim_state(struct si_context * sctx)1068 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
1069 {
1070    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1071    enum pipe_prim_type rast_prim = sctx->current_rast_prim;
1072    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1073 
1074    radeon_begin(cs);
1075 
1076    if (unlikely(si_is_line_stipple_enabled(sctx))) {
1077       /* For lines, reset the stipple pattern at each primitive. Otherwise,
1078        * reset the stipple pattern at each packet (line strips, line loops).
1079        */
1080       bool reset_per_prim = rast_prim == PIPE_PRIM_LINES ||
1081                             rast_prim == PIPE_PRIM_LINES_ADJACENCY;
1082       /* 0 = no reset, 1 = reset per prim, 2 = reset per packet */
1083       unsigned value =
1084          rs->pa_sc_line_stipple | S_028A0C_AUTO_RESET_CNTL(reset_per_prim ? 1 : 2);
1085 
1086       radeon_opt_set_context_reg(sctx, R_028A0C_PA_SC_LINE_STIPPLE, SI_TRACKED_PA_SC_LINE_STIPPLE,
1087                                  value);
1088    }
1089 
1090    unsigned gs_out_prim = si_conv_prim_to_gs_out(rast_prim);
1091    if (unlikely(gs_out_prim != sctx->last_gs_out_prim && (NGG || HAS_GS))) {
1092       radeon_set_context_reg(R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
1093       sctx->last_gs_out_prim = gs_out_prim;
1094    }
1095 
1096    if (GFX_VERSION == GFX9)
1097       radeon_end_update_context_roll(sctx);
1098    else
1099       radeon_end();
1100 
1101    if (NGG) {
1102       struct si_shader *hw_vs = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current;
1103 
1104       if (hw_vs->uses_vs_state_provoking_vertex) {
1105          unsigned vtx_index = rs->flatshade_first ? 0 : gs_out_prim;
1106 
1107          sctx->current_vs_state &= C_VS_STATE_PROVOKING_VTX_INDEX;
1108          sctx->current_vs_state |= S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index);
1109       }
1110 
1111       if (hw_vs->uses_vs_state_outprim) {
1112          sctx->current_vs_state &= C_VS_STATE_OUTPRIM;
1113          sctx->current_vs_state |= S_VS_STATE_OUTPRIM(gs_out_prim);
1114       }
1115    }
1116 }
1117 
1118 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1119           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_vs_state(struct si_context * sctx,unsigned index_size)1120 static void si_emit_vs_state(struct si_context *sctx, unsigned index_size)
1121 {
1122    if (!IS_DRAW_VERTEX_STATE && sctx->num_vs_blit_sgprs) {
1123       /* Re-emit the state after we leave u_blitter. */
1124       sctx->last_vs_state = ~0;
1125       return;
1126    }
1127 
1128    if (sctx->shader.vs.cso->info.uses_base_vertex) {
1129       sctx->current_vs_state &= C_VS_STATE_INDEXED;
1130       sctx->current_vs_state |= S_VS_STATE_INDEXED(!!index_size);
1131    }
1132 
1133    if (sctx->current_vs_state != sctx->last_vs_state) {
1134       struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1135 
1136       /* For the API vertex shader (VS_STATE_INDEXED, LS_OUT_*). */
1137       unsigned vs_base = si_get_user_data_base(GFX_VERSION, HAS_TESS, HAS_GS, NGG,
1138                                                PIPE_SHADER_VERTEX);
1139       radeon_begin(cs);
1140       radeon_set_sh_reg(vs_base + SI_SGPR_VS_STATE_BITS * 4,
1141                         sctx->current_vs_state);
1142 
1143       /* Set CLAMP_VERTEX_COLOR and OUTPRIM in the last stage
1144        * before the rasterizer.
1145        *
1146        * For TES or the GS copy shader without NGG:
1147        */
1148       if (vs_base != R_00B130_SPI_SHADER_USER_DATA_VS_0) {
1149          radeon_set_sh_reg(R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_VS_STATE_BITS * 4,
1150                            sctx->current_vs_state);
1151       }
1152 
1153       /* For NGG: */
1154       if (GFX_VERSION >= GFX10 && vs_base != R_00B230_SPI_SHADER_USER_DATA_GS_0) {
1155          radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + SI_SGPR_VS_STATE_BITS * 4,
1156                            sctx->current_vs_state);
1157       }
1158       radeon_end();
1159 
1160       sctx->last_vs_state = sctx->current_vs_state;
1161    }
1162 }
1163 
1164 ALWAYS_INLINE
si_prim_restart_index_changed(struct si_context * sctx,bool primitive_restart,unsigned restart_index)1165 static bool si_prim_restart_index_changed(struct si_context *sctx, bool primitive_restart,
1166                                           unsigned restart_index)
1167 {
1168    return primitive_restart && (restart_index != sctx->last_restart_index ||
1169                                 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
1170 }
1171 
1172 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
1173           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_ia_multi_vgt_param(struct si_context * sctx,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned num_patches,unsigned instance_count,bool primitive_restart,unsigned min_vertex_count)1174 static void si_emit_ia_multi_vgt_param(struct si_context *sctx,
1175                                        const struct pipe_draw_indirect_info *indirect,
1176                                        enum pipe_prim_type prim, unsigned num_patches,
1177                                        unsigned instance_count, bool primitive_restart,
1178                                        unsigned min_vertex_count)
1179 {
1180    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1181    unsigned ia_multi_vgt_param;
1182 
1183    ia_multi_vgt_param =
1184       si_get_ia_multi_vgt_param<GFX_VERSION, HAS_TESS, HAS_GS, IS_DRAW_VERTEX_STATE>
1185          (sctx, indirect, prim, num_patches, instance_count, primitive_restart,
1186           min_vertex_count);
1187 
1188    /* Draw state. */
1189    if (ia_multi_vgt_param != sctx->last_multi_vgt_param ||
1190        /* Workaround for SpecviewPerf13 Catia hang on GFX9. */
1191        (GFX_VERSION == GFX9 && prim != sctx->last_prim)) {
1192       radeon_begin(cs);
1193 
1194       if (GFX_VERSION == GFX9)
1195          radeon_set_uconfig_reg_idx(sctx->screen, GFX_VERSION,
1196                                     R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
1197       else if (GFX_VERSION >= GFX7)
1198          radeon_set_context_reg_idx(R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
1199       else
1200          radeon_set_context_reg(R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
1201 
1202       radeon_end();
1203 
1204       sctx->last_multi_vgt_param = ia_multi_vgt_param;
1205    }
1206 }
1207 
1208 /* GFX10 removed IA_MULTI_VGT_PARAM in exchange for GE_CNTL.
1209  * We overload last_multi_vgt_param.
1210  */
1211 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
gfx10_emit_ge_cntl(struct si_context * sctx,unsigned num_patches)1212 static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
1213 {
1214    union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
1215    unsigned ge_cntl;
1216 
1217    if (NGG) {
1218       if (HAS_TESS) {
1219          ge_cntl = S_03096C_PRIM_GRP_SIZE(num_patches) |
1220                    S_03096C_VERT_GRP_SIZE(0) |
1221                    S_03096C_BREAK_WAVE_AT_EOI(key.u.tess_uses_prim_id);
1222       } else {
1223          ge_cntl = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->ge_cntl;
1224       }
1225    } else {
1226       unsigned primgroup_size;
1227       unsigned vertgroup_size;
1228 
1229       if (HAS_TESS) {
1230          primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
1231          vertgroup_size = 0;
1232       } else if (HAS_GS) {
1233          unsigned vgt_gs_onchip_cntl = sctx->shader.gs.current->ctx_reg.gs.vgt_gs_onchip_cntl;
1234          primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
1235          vertgroup_size = G_028A44_ES_VERTS_PER_SUBGRP(vgt_gs_onchip_cntl);
1236       } else {
1237          primgroup_size = 128; /* recommended without a GS and tess */
1238          vertgroup_size = 0;
1239       }
1240 
1241       ge_cntl = S_03096C_PRIM_GRP_SIZE(primgroup_size) | S_03096C_VERT_GRP_SIZE(vertgroup_size) |
1242                 S_03096C_BREAK_WAVE_AT_EOI(key.u.uses_tess && key.u.tess_uses_prim_id);
1243    }
1244 
1245    ge_cntl |= S_03096C_PACKET_TO_ONE_PA(si_is_line_stipple_enabled(sctx));
1246 
1247    if (ge_cntl != sctx->last_multi_vgt_param) {
1248       struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1249 
1250       radeon_begin(cs);
1251       radeon_set_uconfig_reg(R_03096C_GE_CNTL, ge_cntl);
1252       radeon_end();
1253       sctx->last_multi_vgt_param = ge_cntl;
1254    }
1255 }
1256 
1257 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1258           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_draw_registers(struct si_context * sctx,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned num_patches,unsigned instance_count,bool primitive_restart,unsigned restart_index,unsigned min_vertex_count)1259 static void si_emit_draw_registers(struct si_context *sctx,
1260                                    const struct pipe_draw_indirect_info *indirect,
1261                                    enum pipe_prim_type prim, unsigned num_patches,
1262                                    unsigned instance_count, bool primitive_restart,
1263                                    unsigned restart_index, unsigned min_vertex_count)
1264 {
1265    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1266 
1267    if (IS_DRAW_VERTEX_STATE)
1268       primitive_restart = false;
1269 
1270    if (GFX_VERSION >= GFX10)
1271       gfx10_emit_ge_cntl<GFX_VERSION, HAS_TESS, HAS_GS, NGG>(sctx, num_patches);
1272    else
1273       si_emit_ia_multi_vgt_param<GFX_VERSION, HAS_TESS, HAS_GS, IS_DRAW_VERTEX_STATE>
1274          (sctx, indirect, prim, num_patches, instance_count, primitive_restart,
1275           min_vertex_count);
1276 
1277    radeon_begin(cs);
1278 
1279    if (prim != sctx->last_prim) {
1280       unsigned vgt_prim = si_conv_pipe_prim(prim);
1281 
1282       if (GFX_VERSION >= GFX10)
1283          radeon_set_uconfig_reg(R_030908_VGT_PRIMITIVE_TYPE, vgt_prim);
1284       else if (GFX_VERSION >= GFX7)
1285          radeon_set_uconfig_reg_idx(sctx->screen, GFX_VERSION, R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
1286       else
1287          radeon_set_config_reg(R_008958_VGT_PRIMITIVE_TYPE, vgt_prim);
1288 
1289       sctx->last_prim = prim;
1290    }
1291 
1292    /* Primitive restart. */
1293    if (primitive_restart != sctx->last_primitive_restart_en) {
1294       if (GFX_VERSION >= GFX9)
1295          radeon_set_uconfig_reg(R_03092C_VGT_MULTI_PRIM_IB_RESET_EN, primitive_restart);
1296       else
1297          radeon_set_context_reg(R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, primitive_restart);
1298 
1299       sctx->last_primitive_restart_en = primitive_restart;
1300    }
1301    if (si_prim_restart_index_changed(sctx, primitive_restart, restart_index)) {
1302       radeon_set_context_reg(R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, restart_index);
1303       sctx->last_restart_index = restart_index;
1304       if (GFX_VERSION == GFX9)
1305          sctx->context_roll = true;
1306    }
1307    radeon_end();
1308 }
1309 
1310 #define EMIT_SQTT_END_DRAW do {                                          \
1311       if (GFX_VERSION >= GFX9 && unlikely(sctx->thread_trace_enabled)) { \
1312          radeon_begin(&sctx->gfx_cs);                                    \
1313          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));       \
1314          radeon_emit(EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) |          \
1315                      EVENT_INDEX(0));                                    \
1316          radeon_end();                                      \
1317       }                                                                  \
1318    } while (0)
1319 
1320 template <chip_class GFX_VERSION, si_has_ngg NGG, si_is_draw_vertex_state IS_DRAW_VERTEX_STATE>
1321 ALWAYS_INLINE
si_emit_draw_packets(struct si_context * sctx,const struct pipe_draw_info * info,unsigned drawid_base,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,struct pipe_resource * indexbuf,unsigned index_size,unsigned index_offset,unsigned instance_count)1322 static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw_info *info,
1323                                  unsigned drawid_base,
1324                                  const struct pipe_draw_indirect_info *indirect,
1325                                  const struct pipe_draw_start_count_bias *draws,
1326                                  unsigned num_draws,
1327                                  struct pipe_resource *indexbuf, unsigned index_size,
1328                                  unsigned index_offset, unsigned instance_count)
1329 {
1330    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1331 
1332    if (unlikely(sctx->thread_trace_enabled)) {
1333       si_sqtt_write_event_marker(sctx, &sctx->gfx_cs, sctx->sqtt_next_event,
1334                                  UINT_MAX, UINT_MAX, UINT_MAX);
1335    }
1336 
1337    uint32_t use_opaque = 0;
1338 
1339    if (!IS_DRAW_VERTEX_STATE && indirect && indirect->count_from_stream_output) {
1340       struct si_streamout_target *t = (struct si_streamout_target *)indirect->count_from_stream_output;
1341 
1342       radeon_begin(cs);
1343       radeon_set_context_reg(R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
1344       radeon_end();
1345 
1346       si_cp_copy_data(sctx, &sctx->gfx_cs, COPY_DATA_REG, NULL,
1347                       R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2, COPY_DATA_SRC_MEM,
1348                       t->buf_filled_size, t->buf_filled_size_offset);
1349       use_opaque = S_0287F0_USE_OPAQUE(1);
1350       indirect = NULL;
1351    }
1352 
1353    uint32_t index_max_size = 0;
1354    uint64_t index_va = 0;
1355 
1356    radeon_begin(cs);
1357 
1358    /* draw packet */
1359    if (index_size) {
1360       /* Register shadowing doesn't shadow INDEX_TYPE. */
1361       if (index_size != sctx->last_index_size || sctx->shadowed_regs) {
1362          unsigned index_type;
1363 
1364          /* Index type computation. When we look at how we need to translate index_size,
1365           * we can see that we just need 2 shifts to get the hw value.
1366           *
1367           * 1 = 001b --> 10b = 2
1368           * 2 = 010b --> 00b = 0
1369           * 4 = 100b --> 01b = 1
1370           */
1371          index_type = ((index_size >> 2) | (index_size << 1)) & 0x3;
1372 
1373          if (GFX_VERSION <= GFX7 && SI_BIG_ENDIAN) {
1374             /* GFX7 doesn't support ubyte indices. */
1375             index_type |= index_size == 2 ? V_028A7C_VGT_DMA_SWAP_16_BIT
1376                                           : V_028A7C_VGT_DMA_SWAP_32_BIT;
1377          }
1378 
1379          if (GFX_VERSION >= GFX9) {
1380             radeon_set_uconfig_reg_idx(sctx->screen, GFX_VERSION,
1381                                        R_03090C_VGT_INDEX_TYPE, 2, index_type);
1382          } else {
1383             radeon_emit(PKT3(PKT3_INDEX_TYPE, 0, 0));
1384             radeon_emit(index_type);
1385          }
1386 
1387          sctx->last_index_size = index_size;
1388       }
1389 
1390       index_max_size = (indexbuf->width0 - index_offset) >> util_logbase2(index_size);
1391       /* Skip draw calls with 0-sized index buffers.
1392        * They cause a hang on some chips, like Navi10-14.
1393        */
1394       if (!index_max_size) {
1395          radeon_end();
1396          return;
1397       }
1398 
1399       index_va = si_resource(indexbuf)->gpu_address + index_offset;
1400 
1401       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(indexbuf),
1402                                 RADEON_USAGE_READ | RADEON_PRIO_INDEX_BUFFER);
1403    } else {
1404       /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
1405        * so the state must be re-emitted before the next indexed draw.
1406        */
1407       if (GFX_VERSION >= GFX7)
1408          sctx->last_index_size = -1;
1409    }
1410 
1411    unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
1412    bool render_cond_bit = sctx->render_cond_enabled;
1413 
1414    if (!IS_DRAW_VERTEX_STATE && indirect) {
1415       assert(num_draws == 1);
1416       uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address;
1417 
1418       assert(indirect_va % 8 == 0);
1419 
1420       si_invalidate_draw_constants(sctx);
1421 
1422       radeon_emit(PKT3(PKT3_SET_BASE, 2, 0));
1423       radeon_emit(1);
1424       radeon_emit(indirect_va);
1425       radeon_emit(indirect_va >> 32);
1426 
1427       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(indirect->buffer),
1428                                 RADEON_USAGE_READ | RADEON_PRIO_DRAW_INDIRECT);
1429 
1430       unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
1431 
1432       assert(indirect->offset % 4 == 0);
1433 
1434       if (index_size) {
1435          radeon_emit(PKT3(PKT3_INDEX_BASE, 1, 0));
1436          radeon_emit(index_va);
1437          radeon_emit(index_va >> 32);
1438 
1439          radeon_emit(PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1440          radeon_emit(index_max_size);
1441       }
1442 
1443       if (!sctx->screen->has_draw_indirect_multi) {
1444          radeon_emit(PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT : PKT3_DRAW_INDIRECT, 3,
1445                           render_cond_bit));
1446          radeon_emit(indirect->offset);
1447          radeon_emit((sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
1448          radeon_emit((sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
1449          radeon_emit(di_src_sel);
1450       } else {
1451          uint64_t count_va = 0;
1452 
1453          if (indirect->indirect_draw_count) {
1454             struct si_resource *params_buf = si_resource(indirect->indirect_draw_count);
1455 
1456             radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, params_buf,
1457                                       RADEON_USAGE_READ | RADEON_PRIO_DRAW_INDIRECT);
1458 
1459             count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
1460          }
1461 
1462          radeon_emit(PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI : PKT3_DRAW_INDIRECT_MULTI, 8,
1463                           render_cond_bit));
1464          radeon_emit(indirect->offset);
1465          radeon_emit((sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
1466          radeon_emit((sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
1467          radeon_emit(((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
1468                      S_2C3_DRAW_INDEX_ENABLE(sctx->shader.vs.cso->info.uses_drawid) |
1469                      S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
1470          radeon_emit(indirect->draw_count);
1471          radeon_emit(count_va);
1472          radeon_emit(count_va >> 32);
1473          radeon_emit(indirect->stride);
1474          radeon_emit(di_src_sel);
1475       }
1476    } else {
1477       /* Register shadowing requires that we always emit PKT3_NUM_INSTANCES. */
1478       if (sctx->shadowed_regs ||
1479           sctx->last_instance_count == SI_INSTANCE_COUNT_UNKNOWN ||
1480           sctx->last_instance_count != instance_count) {
1481          radeon_emit(PKT3(PKT3_NUM_INSTANCES, 0, 0));
1482          radeon_emit(instance_count);
1483          sctx->last_instance_count = instance_count;
1484       }
1485 
1486       /* Base vertex and start instance. */
1487       int base_vertex = index_size ? draws[0].index_bias : draws[0].start;
1488 
1489       bool set_draw_id = !IS_DRAW_VERTEX_STATE && sctx->vs_uses_draw_id;
1490       bool set_base_instance = sctx->vs_uses_base_instance;
1491 
1492       if (!IS_DRAW_VERTEX_STATE && sctx->num_vs_blit_sgprs) {
1493          /* Re-emit draw constants after we leave u_blitter. */
1494          si_invalidate_draw_sh_constants(sctx);
1495 
1496          /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
1497          radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4, sctx->num_vs_blit_sgprs);
1498          radeon_emit_array(sctx->vs_blit_sh_data, sctx->num_vs_blit_sgprs);
1499       } else if (base_vertex != sctx->last_base_vertex ||
1500                  sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
1501                  (set_base_instance &&
1502                   (info->start_instance != sctx->last_start_instance ||
1503                    sctx->last_start_instance == SI_START_INSTANCE_UNKNOWN)) ||
1504                  (set_draw_id &&
1505                   (drawid_base != sctx->last_drawid ||
1506                    sctx->last_drawid == SI_DRAW_ID_UNKNOWN)) ||
1507                  sh_base_reg != sctx->last_sh_base_reg) {
1508          if (set_base_instance) {
1509             radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
1510             radeon_emit(base_vertex);
1511             radeon_emit(drawid_base);
1512             radeon_emit(info->start_instance);
1513 
1514             sctx->last_start_instance = info->start_instance;
1515             sctx->last_drawid = drawid_base;
1516          } else if (set_draw_id) {
1517             radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
1518             radeon_emit(base_vertex);
1519             radeon_emit(drawid_base);
1520 
1521             sctx->last_drawid = drawid_base;
1522          } else {
1523             radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, base_vertex);
1524          }
1525 
1526          sctx->last_base_vertex = base_vertex;
1527          sctx->last_sh_base_reg = sh_base_reg;
1528       }
1529 
1530       /* Don't update draw_id in the following code if it doesn't increment. */
1531       bool increment_draw_id = !IS_DRAW_VERTEX_STATE && num_draws > 1 &&
1532                                set_draw_id && info->increment_draw_id;
1533 
1534       if (index_size) {
1535          /* NOT_EOP allows merging multiple draws into 1 wave, but only user VGPRs
1536           * can be changed between draws, and GS fast launch must be disabled.
1537           * NOT_EOP doesn't work on gfx9 and older.
1538           *
1539           * Instead of doing this, which evaluates the case conditions repeatedly:
1540           *  for (all draws) {
1541           *    if (case1);
1542           *    else;
1543           *  }
1544           *
1545           * Use this structuring to evaluate the case conditions once:
1546           *  if (case1) for (all draws);
1547           *  else for (all draws);
1548           *
1549           */
1550          bool index_bias_varies = !IS_DRAW_VERTEX_STATE && num_draws > 1 &&
1551                                   info->index_bias_varies;
1552 
1553          if (increment_draw_id) {
1554             if (index_bias_varies) {
1555                for (unsigned i = 0; i < num_draws; i++) {
1556                   uint64_t va = index_va + draws[i].start * index_size;
1557 
1558                   if (i > 0) {
1559                      radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
1560                      radeon_emit(draws[i].index_bias);
1561                      radeon_emit(drawid_base + i);
1562                   }
1563 
1564                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1565                   radeon_emit(index_max_size);
1566                   radeon_emit(va);
1567                   radeon_emit(va >> 32);
1568                   radeon_emit(draws[i].count);
1569                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA); /* NOT_EOP disabled */
1570                }
1571                if (num_draws > 1) {
1572                   sctx->last_base_vertex = draws[num_draws - 1].index_bias;
1573                   sctx->last_drawid = drawid_base + num_draws - 1;
1574                }
1575             } else {
1576                /* Only DrawID varies. */
1577                for (unsigned i = 0; i < num_draws; i++) {
1578                   uint64_t va = index_va + draws[i].start * index_size;
1579 
1580                   if (i > 0)
1581                      radeon_set_sh_reg(sh_base_reg + SI_SGPR_DRAWID * 4, drawid_base + i);
1582 
1583                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1584                   radeon_emit(index_max_size);
1585                   radeon_emit(va);
1586                   radeon_emit(va >> 32);
1587                   radeon_emit(draws[i].count);
1588                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA); /* NOT_EOP disabled */
1589                }
1590                if (num_draws > 1)
1591                   sctx->last_drawid = drawid_base + num_draws - 1;
1592             }
1593          } else {
1594             if (index_bias_varies) {
1595                /* Only BaseVertex varies. */
1596                for (unsigned i = 0; i < num_draws; i++) {
1597                   uint64_t va = index_va + draws[i].start * index_size;
1598 
1599                   if (i > 0)
1600                      radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].index_bias);
1601 
1602                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1603                   radeon_emit(index_max_size);
1604                   radeon_emit(va);
1605                   radeon_emit(va >> 32);
1606                   radeon_emit(draws[i].count);
1607                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA); /* NOT_EOP disabled */
1608                }
1609                if (num_draws > 1)
1610                   sctx->last_base_vertex = draws[num_draws - 1].index_bias;
1611             } else {
1612                /* DrawID and BaseVertex are constant. */
1613                if (GFX_VERSION == GFX10) {
1614                   /* GFX10 has a bug that consecutive draw packets with NOT_EOP must not have
1615                    * count == 0 in the last draw (which doesn't set NOT_EOP).
1616                    *
1617                    * So remove all trailing draws with count == 0.
1618                    */
1619                   while (num_draws > 1 && !draws[num_draws - 1].count)
1620                      num_draws--;
1621                }
1622 
1623                for (unsigned i = 0; i < num_draws; i++) {
1624                   uint64_t va = index_va + draws[i].start * index_size;
1625 
1626                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1627                   radeon_emit(index_max_size);
1628                   radeon_emit(va);
1629                   radeon_emit(va >> 32);
1630                   radeon_emit(draws[i].count);
1631                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA |
1632                               S_0287F0_NOT_EOP(GFX_VERSION >= GFX10 && i < num_draws - 1));
1633                }
1634             }
1635          }
1636       } else {
1637          for (unsigned i = 0; i < num_draws; i++) {
1638             if (i > 0) {
1639                if (increment_draw_id) {
1640                   unsigned draw_id = drawid_base + i;
1641 
1642                   radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
1643                   radeon_emit(draws[i].start);
1644                   radeon_emit(draw_id);
1645 
1646                   sctx->last_drawid = draw_id;
1647                } else {
1648                   radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].start);
1649                }
1650             }
1651 
1652             radeon_emit(PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
1653             radeon_emit(draws[i].count);
1654             radeon_emit(V_0287F0_DI_SRC_SEL_AUTO_INDEX | use_opaque);
1655          }
1656          if (num_draws > 1 && (IS_DRAW_VERTEX_STATE || !sctx->num_vs_blit_sgprs))
1657             sctx->last_base_vertex = draws[num_draws - 1].start;
1658       }
1659    }
1660    radeon_end();
1661 
1662    EMIT_SQTT_END_DRAW;
1663 }
1664 
1665 /* Return false if not bound. */
1666 template<chip_class GFX_VERSION>
si_set_vb_descriptor(struct si_vertex_elements * velems,struct pipe_vertex_buffer * vb,unsigned index,uint32_t * desc)1667 static bool ALWAYS_INLINE si_set_vb_descriptor(struct si_vertex_elements *velems,
1668                                                struct pipe_vertex_buffer *vb,
1669                                                unsigned index, /* vertex element index */
1670                                                uint32_t *desc) /* where to upload descriptors */
1671 {
1672    struct si_resource *buf = si_resource(vb->buffer.resource);
1673    if (!buf) {
1674       memset(desc, 0, 16);
1675       return false;
1676    }
1677 
1678    int64_t offset = (int64_t)((int)vb->buffer_offset) + velems->src_offset[index];
1679 
1680    if (offset >= buf->b.b.width0) {
1681       assert(offset < buf->b.b.width0);
1682       memset(desc, 0, 16);
1683       return false;
1684    }
1685 
1686    uint64_t va = buf->gpu_address + offset;
1687 
1688    int64_t num_records = (int64_t)buf->b.b.width0 - offset;
1689    if (GFX_VERSION != GFX8 && vb->stride) {
1690       /* Round up by rounding down and adding 1 */
1691       num_records = (num_records - velems->format_size[index]) / vb->stride + 1;
1692    }
1693    assert(num_records >= 0 && num_records <= UINT_MAX);
1694 
1695    uint32_t rsrc_word3 = velems->rsrc_word3[index];
1696 
1697    /* OOB_SELECT chooses the out-of-bounds check:
1698     *  - 1: index >= NUM_RECORDS (Structured)
1699     *  - 3: offset >= NUM_RECORDS (Raw)
1700     */
1701    if (GFX_VERSION >= GFX10)
1702       rsrc_word3 |= S_008F0C_OOB_SELECT(vb->stride ? V_008F0C_OOB_SELECT_STRUCTURED
1703                                                    : V_008F0C_OOB_SELECT_RAW);
1704 
1705    desc[0] = va;
1706    desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(vb->stride);
1707    desc[2] = num_records;
1708    desc[3] = rsrc_word3;
1709    return true;
1710 }
1711 
1712 #if GFX_VER == 6 /* declare this function only once because it supports all chips. */
1713 
si_set_vertex_buffer_descriptor(struct si_screen * sscreen,struct si_vertex_elements * velems,struct pipe_vertex_buffer * vb,unsigned element_index,uint32_t * out)1714 void si_set_vertex_buffer_descriptor(struct si_screen *sscreen, struct si_vertex_elements *velems,
1715                                      struct pipe_vertex_buffer *vb, unsigned element_index,
1716                                      uint32_t *out)
1717 {
1718    switch (sscreen->info.chip_class) {
1719    case GFX6:
1720       si_set_vb_descriptor<GFX6>(velems, vb, element_index, out);
1721       break;
1722    case GFX7:
1723       si_set_vb_descriptor<GFX7>(velems, vb, element_index, out);
1724       break;
1725    case GFX8:
1726       si_set_vb_descriptor<GFX8>(velems, vb, element_index, out);
1727       break;
1728    case GFX9:
1729       si_set_vb_descriptor<GFX9>(velems, vb, element_index, out);
1730       break;
1731    case GFX10:
1732       si_set_vb_descriptor<GFX10>(velems, vb, element_index, out);
1733       break;
1734    case GFX10_3:
1735       si_set_vb_descriptor<GFX10_3>(velems, vb, element_index, out);
1736       break;
1737    default:
1738       unreachable("unhandled chip class");
1739    }
1740 }
1741 
1742 #endif
1743 
1744 template<util_popcnt POPCNT>
get_next_vertex_state_elem(struct pipe_vertex_state * state,uint32_t * partial_velem_mask)1745 static ALWAYS_INLINE unsigned get_next_vertex_state_elem(struct pipe_vertex_state *state,
1746                                                          uint32_t *partial_velem_mask)
1747 {
1748    unsigned semantic_index = u_bit_scan(partial_velem_mask);
1749    assert(state->input.full_velem_mask & BITFIELD_BIT(semantic_index));
1750    /* A prefix mask of the full mask gives us the index in pipe_vertex_state. */
1751    return util_bitcount_fast<POPCNT>(state->input.full_velem_mask & BITFIELD_MASK(semantic_index));
1752 }
1753 
1754 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1755           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE, util_popcnt POPCNT> ALWAYS_INLINE
si_upload_and_prefetch_VB_descriptors(struct si_context * sctx,struct pipe_vertex_state * state,uint32_t partial_velem_mask)1756 static bool si_upload_and_prefetch_VB_descriptors(struct si_context *sctx,
1757                                                   struct pipe_vertex_state *state,
1758                                                   uint32_t partial_velem_mask)
1759 {
1760    struct si_vertex_state *vstate = (struct si_vertex_state *)state;
1761    unsigned count = IS_DRAW_VERTEX_STATE ? util_bitcount_fast<POPCNT>(partial_velem_mask) :
1762                                            sctx->num_vertex_elements;
1763    unsigned sh_base = si_get_user_data_base(GFX_VERSION, HAS_TESS, HAS_GS, NGG,
1764                                             PIPE_SHADER_VERTEX);
1765    unsigned num_vbos_in_user_sgprs = si_num_vbos_in_user_sgprs_inline(GFX_VERSION);
1766    bool pointer_dirty, user_sgprs_dirty;
1767 
1768    assert(count <= SI_MAX_ATTRIBS);
1769 
1770    if (sctx->vertex_buffers_dirty || IS_DRAW_VERTEX_STATE) {
1771       assert(count);
1772 
1773       struct si_vertex_elements *velems = sctx->vertex_elements;
1774       unsigned alloc_size = IS_DRAW_VERTEX_STATE ?
1775                                vstate->velems.vb_desc_list_alloc_size :
1776                                velems->vb_desc_list_alloc_size;
1777       uint32_t *ptr;
1778 
1779       if (alloc_size) {
1780          /* Vertex buffer descriptors are the only ones which are uploaded directly
1781           * and don't go through si_upload_graphics_shader_descriptors.
1782           */
1783          u_upload_alloc(sctx->b.const_uploader, 0, alloc_size,
1784                         si_optimal_tcc_alignment(sctx, alloc_size), &sctx->vb_descriptors_offset,
1785                         (struct pipe_resource **)&sctx->vb_descriptors_buffer, (void **)&ptr);
1786          if (!sctx->vb_descriptors_buffer) {
1787             sctx->vb_descriptors_offset = 0;
1788             sctx->vb_descriptors_gpu_list = NULL;
1789             return false;
1790          }
1791 
1792          sctx->vb_descriptors_gpu_list = ptr;
1793          radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->vb_descriptors_buffer,
1794                                    RADEON_USAGE_READ | RADEON_PRIO_DESCRIPTORS);
1795          /* GFX6 doesn't support the L2 prefetch. */
1796          if (GFX_VERSION >= GFX7)
1797             si_cp_dma_prefetch(sctx, &sctx->vb_descriptors_buffer->b.b, sctx->vb_descriptors_offset,
1798                                alloc_size);
1799       } else {
1800          si_resource_reference(&sctx->vb_descriptors_buffer, NULL);
1801       }
1802 
1803       if (IS_DRAW_VERTEX_STATE) {
1804          unsigned i = 0;
1805 
1806          if (num_vbos_in_user_sgprs) {
1807             unsigned num_vb_sgprs = MIN2(count, num_vbos_in_user_sgprs) * 4;
1808 
1809             radeon_begin(&sctx->gfx_cs);
1810             radeon_set_sh_reg_seq(sh_base + SI_SGPR_VS_VB_DESCRIPTOR_FIRST * 4, num_vb_sgprs);
1811 
1812             for (; partial_velem_mask && i < num_vbos_in_user_sgprs; i++) {
1813                unsigned velem_index = get_next_vertex_state_elem<POPCNT>(state, &partial_velem_mask);
1814 
1815                radeon_emit_array(&vstate->descriptors[velem_index * 4], 4);
1816             }
1817             radeon_end();
1818          }
1819 
1820          for (; partial_velem_mask; i++) {
1821             unsigned velem_index = get_next_vertex_state_elem<POPCNT>(state, &partial_velem_mask);
1822             uint32_t *desc = &ptr[(i - num_vbos_in_user_sgprs) * 4];
1823 
1824             memcpy(desc, &vstate->descriptors[velem_index * 4], 16);
1825          }
1826 
1827          if (vstate->b.input.vbuffer.buffer.resource != vstate->b.input.indexbuf) {
1828             radeon_add_to_buffer_list(sctx, &sctx->gfx_cs,
1829                                       si_resource(vstate->b.input.vbuffer.buffer.resource),
1830                                       RADEON_USAGE_READ | RADEON_PRIO_VERTEX_BUFFER);
1831          }
1832 
1833          /* The next draw_vbo should recompute and rebind vertex buffer descriptors. */
1834          sctx->vertex_buffers_dirty = sctx->num_vertex_elements > 0;
1835 
1836          user_sgprs_dirty = false; /* We just set them above. */
1837          pointer_dirty = count > num_vbos_in_user_sgprs;
1838       } else {
1839          unsigned first_vb_use_mask = velems->first_vb_use_mask;
1840 
1841          for (unsigned i = 0; i < count; i++) {
1842             unsigned vbo_index = velems->vertex_buffer_index[i];
1843             struct pipe_vertex_buffer *vb = &sctx->vertex_buffer[vbo_index];
1844             uint32_t *desc = i < num_vbos_in_user_sgprs ? &sctx->vb_descriptor_user_sgprs[i * 4]
1845                                                         : &ptr[(i - num_vbos_in_user_sgprs) * 4];
1846 
1847             if (!si_set_vb_descriptor<GFX_VERSION>(velems, vb, i, desc))
1848                continue;
1849 
1850             if (first_vb_use_mask & (1 << i)) {
1851                radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(vb->buffer.resource),
1852                                          RADEON_USAGE_READ | RADEON_PRIO_VERTEX_BUFFER);
1853             }
1854          }
1855 
1856          sctx->vertex_buffers_dirty = false;
1857          user_sgprs_dirty = num_vbos_in_user_sgprs > 0;
1858          pointer_dirty = alloc_size != 0;
1859       }
1860    } else {
1861       pointer_dirty = sctx->vertex_buffer_pointer_dirty;
1862       user_sgprs_dirty = sctx->vertex_buffer_user_sgprs_dirty;
1863    }
1864 
1865    if (pointer_dirty || user_sgprs_dirty) {
1866       struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1867       assert(count);
1868 
1869       radeon_begin(cs);
1870 
1871       /* Set the pointer to vertex buffer descriptors. */
1872       if (pointer_dirty && count > num_vbos_in_user_sgprs) {
1873          /* Find the location of the VB descriptor pointer. */
1874          unsigned sh_dw_offset = SI_VS_NUM_USER_SGPR;
1875          if (GFX_VERSION >= GFX9) {
1876             if (HAS_TESS)
1877                sh_dw_offset = GFX9_TCS_NUM_USER_SGPR;
1878             else if (HAS_GS || NGG)
1879                sh_dw_offset = GFX9_GS_NUM_USER_SGPR;
1880          }
1881 
1882          radeon_set_sh_reg(sh_base + sh_dw_offset * 4,
1883                            sctx->vb_descriptors_buffer->gpu_address +
1884                            sctx->vb_descriptors_offset);
1885          sctx->vertex_buffer_pointer_dirty = false;
1886       }
1887 
1888       /* Set VB descriptors in user SGPRs. */
1889       if (user_sgprs_dirty) {
1890          assert(num_vbos_in_user_sgprs);
1891 
1892          unsigned num_sgprs = MIN2(count, num_vbos_in_user_sgprs) * 4;
1893 
1894          radeon_set_sh_reg_seq(sh_base + SI_SGPR_VS_VB_DESCRIPTOR_FIRST * 4, num_sgprs);
1895          radeon_emit_array(sctx->vb_descriptor_user_sgprs, num_sgprs);
1896          sctx->vertex_buffer_user_sgprs_dirty = false;
1897       }
1898       radeon_end();
1899    }
1900 
1901    return true;
1902 }
1903 
si_get_draw_start_count(struct si_context * sctx,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,unsigned * start,unsigned * count)1904 static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_draw_info *info,
1905                                     const struct pipe_draw_indirect_info *indirect,
1906                                     const struct pipe_draw_start_count_bias *draws,
1907                                     unsigned num_draws, unsigned *start, unsigned *count)
1908 {
1909    if (indirect && !indirect->count_from_stream_output) {
1910       unsigned indirect_count;
1911       struct pipe_transfer *transfer;
1912       unsigned begin, end;
1913       unsigned map_size;
1914       unsigned *data;
1915 
1916       if (indirect->indirect_draw_count) {
1917          data = (unsigned*)
1918                 pipe_buffer_map_range(&sctx->b, indirect->indirect_draw_count,
1919                                       indirect->indirect_draw_count_offset, sizeof(unsigned),
1920                                       PIPE_MAP_READ, &transfer);
1921 
1922          indirect_count = *data;
1923 
1924          pipe_buffer_unmap(&sctx->b, transfer);
1925       } else {
1926          indirect_count = indirect->draw_count;
1927       }
1928 
1929       if (!indirect_count) {
1930          *start = *count = 0;
1931          return;
1932       }
1933 
1934       map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
1935       data = (unsigned*)
1936              pipe_buffer_map_range(&sctx->b, indirect->buffer, indirect->offset, map_size,
1937                                    PIPE_MAP_READ, &transfer);
1938 
1939       begin = UINT_MAX;
1940       end = 0;
1941 
1942       for (unsigned i = 0; i < indirect_count; ++i) {
1943          unsigned count = data[0];
1944          unsigned start = data[2];
1945 
1946          if (count > 0) {
1947             begin = MIN2(begin, start);
1948             end = MAX2(end, start + count);
1949          }
1950 
1951          data += indirect->stride / sizeof(unsigned);
1952       }
1953 
1954       pipe_buffer_unmap(&sctx->b, transfer);
1955 
1956       if (begin < end) {
1957          *start = begin;
1958          *count = end - begin;
1959       } else {
1960          *start = *count = 0;
1961       }
1962    } else {
1963       unsigned min_element = UINT_MAX;
1964       unsigned max_element = 0;
1965 
1966       for (unsigned i = 0; i < num_draws; i++) {
1967          min_element = MIN2(min_element, draws[i].start);
1968          max_element = MAX2(max_element, draws[i].start + draws[i].count);
1969       }
1970 
1971       *start = min_element;
1972       *count = max_element - min_element;
1973    }
1974 }
1975 
1976 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1977           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_all_states(struct si_context * sctx,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned instance_count,unsigned min_vertex_count,bool primitive_restart,unsigned skip_atom_mask)1978 static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
1979                                const struct pipe_draw_indirect_info *indirect,
1980                                enum pipe_prim_type prim, unsigned instance_count,
1981                                unsigned min_vertex_count, bool primitive_restart,
1982                                unsigned skip_atom_mask)
1983 {
1984    unsigned num_patches = 0;
1985 
1986    si_emit_rasterizer_prim_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG>(sctx);
1987    if (HAS_TESS)
1988       si_emit_derived_tess_state(sctx, &num_patches);
1989 
1990    /* Emit state atoms. */
1991    unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
1992    if (mask) {
1993       do {
1994          sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
1995       } while (mask);
1996 
1997       sctx->dirty_atoms &= skip_atom_mask;
1998    }
1999 
2000    /* Emit states. */
2001    mask = sctx->dirty_states;
2002    if (mask) {
2003       do {
2004          unsigned i = u_bit_scan(&mask);
2005          struct si_pm4_state *state = sctx->queued.array[i];
2006 
2007          /* All places should unset dirty_states if this doesn't pass. */
2008          assert(state && state != sctx->emitted.array[i]);
2009 
2010          si_pm4_emit(sctx, state);
2011          sctx->emitted.array[i] = state;
2012       } while (mask);
2013 
2014       sctx->dirty_states = 0;
2015    }
2016 
2017    /* Emit draw states. */
2018    si_emit_vs_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>(sctx, info->index_size);
2019    si_emit_draw_registers<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>
2020          (sctx, indirect, prim, num_patches, instance_count, primitive_restart,
2021           info->restart_index, min_vertex_count);
2022 }
2023 
2024 #define DRAW_CLEANUP do {                                 \
2025       if (index_size && indexbuf != info->index.resource) \
2026          pipe_resource_reference(&indexbuf, NULL);        \
2027    } while (0)
2028 
2029 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
2030           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE, util_popcnt POPCNT> ALWAYS_INLINE
si_draw(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,struct pipe_vertex_state * state,uint32_t partial_velem_mask)2031 static void si_draw(struct pipe_context *ctx,
2032                     const struct pipe_draw_info *info,
2033                     unsigned drawid_offset,
2034                     const struct pipe_draw_indirect_info *indirect,
2035                     const struct pipe_draw_start_count_bias *draws,
2036                     unsigned num_draws,
2037                     struct pipe_vertex_state *state,
2038                     uint32_t partial_velem_mask)
2039 {
2040    /* Keep code that uses the least number of local variables as close to the beginning
2041     * of this function as possible to minimize register pressure.
2042     *
2043     * It doesn't matter where we return due to invalid parameters because such cases
2044     * shouldn't occur in practice.
2045     */
2046    struct si_context *sctx = (struct si_context *)ctx;
2047 
2048    /* Recompute and re-emit the texture resource states if needed. */
2049    unsigned dirty_tex_counter = p_atomic_read(&sctx->screen->dirty_tex_counter);
2050    if (unlikely(dirty_tex_counter != sctx->last_dirty_tex_counter)) {
2051       sctx->last_dirty_tex_counter = dirty_tex_counter;
2052       sctx->framebuffer.dirty_cbufs |= ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
2053       sctx->framebuffer.dirty_zsbuf = true;
2054       si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
2055       si_update_all_texture_descriptors(sctx);
2056    }
2057 
2058    unsigned dirty_buf_counter = p_atomic_read(&sctx->screen->dirty_buf_counter);
2059    if (unlikely(dirty_buf_counter != sctx->last_dirty_buf_counter)) {
2060       sctx->last_dirty_buf_counter = dirty_buf_counter;
2061       /* Rebind all buffers unconditionally. */
2062       si_rebind_buffer(sctx, NULL);
2063    }
2064 
2065    si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
2066    si_need_gfx_cs_space(sctx, num_draws);
2067 
2068    if (HAS_TESS) {
2069       struct si_shader_selector *tcs = sctx->shader.tcs.cso;
2070 
2071       /* The rarely occuring tcs == NULL case is not optimized. */
2072       bool same_patch_vertices =
2073          GFX_VERSION >= GFX9 &&
2074          tcs && sctx->patch_vertices == tcs->info.base.tess.tcs_vertices_out;
2075 
2076       if (sctx->shader.tcs.key.ge.opt.same_patch_vertices != same_patch_vertices) {
2077          sctx->shader.tcs.key.ge.opt.same_patch_vertices = same_patch_vertices;
2078          sctx->do_update_shaders = true;
2079       }
2080 
2081       if (GFX_VERSION == GFX9 && sctx->screen->info.has_ls_vgpr_init_bug) {
2082          /* Determine whether the LS VGPR fix should be applied.
2083           *
2084           * It is only required when num input CPs > num output CPs,
2085           * which cannot happen with the fixed function TCS. We should
2086           * also update this bit when switching from TCS to fixed
2087           * function TCS.
2088           */
2089          bool ls_vgpr_fix =
2090             tcs && sctx->patch_vertices > tcs->info.base.tess.tcs_vertices_out;
2091 
2092          if (ls_vgpr_fix != sctx->shader.tcs.key.ge.part.tcs.ls_prolog.ls_vgpr_fix) {
2093             sctx->shader.tcs.key.ge.part.tcs.ls_prolog.ls_vgpr_fix = ls_vgpr_fix;
2094             sctx->fixed_func_tcs_shader.key.ge.part.tcs.ls_prolog.ls_vgpr_fix = ls_vgpr_fix;
2095             sctx->do_update_shaders = true;
2096          }
2097       }
2098    }
2099 
2100    enum pipe_prim_type prim = (enum pipe_prim_type)info->mode;
2101    unsigned instance_count = info->instance_count;
2102 
2103    /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
2104     * no workaround for indirect draws, but we can at least skip
2105     * direct draws.
2106     * 'instance_count == 0' seems to be problematic on Renoir chips (#4866),
2107     * so simplify the condition and drop these draws for all <= GFX9 chips.
2108     */
2109    if (GFX_VERSION <= GFX9 && unlikely(!IS_DRAW_VERTEX_STATE && !indirect && !instance_count))
2110       return;
2111 
2112    struct si_shader_selector *vs = sctx->shader.vs.cso;
2113    struct si_vertex_state *vstate = (struct si_vertex_state *)state;
2114    if (unlikely(!vs ||
2115                 (!IS_DRAW_VERTEX_STATE && sctx->num_vertex_elements < vs->num_vs_inputs) ||
2116                 (IS_DRAW_VERTEX_STATE && vstate->velems.count < vs->num_vs_inputs) ||
2117                 !sctx->shader.ps.cso || (HAS_TESS != (prim == PIPE_PRIM_PATCHES)))) {
2118       assert(0);
2119       return;
2120    }
2121 
2122    if (GFX_VERSION <= GFX9 && HAS_GS) {
2123       /* Determine whether the GS triangle strip adjacency fix should
2124        * be applied. Rotate every other triangle if triangle strips with
2125        * adjacency are fed to the GS. This doesn't work if primitive
2126        * restart occurs after an odd number of triangles.
2127        */
2128       bool gs_tri_strip_adj_fix =
2129          !HAS_TESS && prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
2130 
2131       if (gs_tri_strip_adj_fix != sctx->shader.gs.key.ge.mono.u.gs_tri_strip_adj_fix) {
2132          sctx->shader.gs.key.ge.mono.u.gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
2133          sctx->do_update_shaders = true;
2134       }
2135    }
2136 
2137    struct pipe_resource *indexbuf = info->index.resource;
2138    unsigned index_size = info->index_size;
2139    unsigned index_offset = indirect && indirect->buffer ? draws[0].start * index_size : 0;
2140 
2141    if (index_size) {
2142       /* Translate or upload, if needed. */
2143       /* 8-bit indices are supported on GFX8. */
2144       if (!IS_DRAW_VERTEX_STATE && GFX_VERSION <= GFX7 && index_size == 1) {
2145          unsigned start, count, start_offset, size, offset;
2146          void *ptr;
2147 
2148          si_get_draw_start_count(sctx, info, indirect, draws, num_draws, &start, &count);
2149          start_offset = start * 2;
2150          size = count * 2;
2151 
2152          indexbuf = NULL;
2153          u_upload_alloc(ctx->stream_uploader, start_offset, size,
2154                         si_optimal_tcc_alignment(sctx, size), &offset, &indexbuf, &ptr);
2155          if (unlikely(!indexbuf))
2156             return;
2157 
2158          util_shorten_ubyte_elts_to_userptr(&sctx->b, info, 0, 0, index_offset + start, count, ptr);
2159 
2160          /* info->start will be added by the drawing code */
2161          index_offset = offset - start_offset;
2162          index_size = 2;
2163       } else if (!IS_DRAW_VERTEX_STATE && info->has_user_indices) {
2164          unsigned start_offset;
2165 
2166          assert(!indirect);
2167          assert(num_draws == 1);
2168          start_offset = draws[0].start * index_size;
2169 
2170          indexbuf = NULL;
2171          u_upload_data(ctx->stream_uploader, start_offset, draws[0].count * index_size,
2172                        sctx->screen->info.tcc_cache_line_size,
2173                        (char *)info->index.user + start_offset, &index_offset, &indexbuf);
2174          if (unlikely(!indexbuf))
2175             return;
2176 
2177          /* info->start will be added by the drawing code */
2178          index_offset -= start_offset;
2179       } else if (GFX_VERSION <= GFX7 && si_resource(indexbuf)->TC_L2_dirty) {
2180          /* GFX8 reads index buffers through TC L2, so it doesn't
2181           * need this. */
2182          sctx->flags |= SI_CONTEXT_WB_L2;
2183          si_resource(indexbuf)->TC_L2_dirty = false;
2184       }
2185    }
2186 
2187    unsigned min_direct_count = 0;
2188    unsigned total_direct_count = 0;
2189 
2190    if (!IS_DRAW_VERTEX_STATE && indirect) {
2191       /* Add the buffer size for memory checking in need_cs_space. */
2192       if (indirect->buffer)
2193          si_context_add_resource_size(sctx, indirect->buffer);
2194 
2195       /* Indirect buffers use TC L2 on GFX9, but not older hw. */
2196       if (GFX_VERSION <= GFX8) {
2197          if (indirect->buffer && si_resource(indirect->buffer)->TC_L2_dirty) {
2198             sctx->flags |= SI_CONTEXT_WB_L2;
2199             si_resource(indirect->buffer)->TC_L2_dirty = false;
2200          }
2201 
2202          if (indirect->indirect_draw_count &&
2203              si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
2204             sctx->flags |= SI_CONTEXT_WB_L2;
2205             si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
2206          }
2207       }
2208       total_direct_count = INT_MAX; /* just set something other than 0 to enable shader culling */
2209    } else {
2210       total_direct_count = min_direct_count = draws[0].count;
2211 
2212       for (unsigned i = 1; i < num_draws; i++) {
2213          unsigned count = draws[i].count;
2214 
2215          total_direct_count += count;
2216          min_direct_count = MIN2(min_direct_count, count);
2217       }
2218    }
2219 
2220    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2221    bool primitive_restart =
2222       info->primitive_restart &&
2223       (!sctx->screen->options.prim_restart_tri_strips_only ||
2224        (prim != PIPE_PRIM_TRIANGLE_STRIP && prim != PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY));
2225 
2226    /* Set the rasterization primitive type.
2227     *
2228     * This must be done after si_decompress_textures, which can call
2229     * draw_vbo recursively, and before si_update_shaders, which uses
2230     * current_rast_prim for this draw_vbo call.
2231     */
2232    if (!HAS_GS && !HAS_TESS) {
2233       enum pipe_prim_type rast_prim;
2234 
2235       if (util_rast_prim_is_triangles(prim)) {
2236          rast_prim = PIPE_PRIM_TRIANGLES;
2237       } else {
2238          /* Only possibilities, POINTS, LINE*, RECTANGLES */
2239          rast_prim = prim;
2240       }
2241 
2242       if (rast_prim != sctx->current_rast_prim) {
2243          if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
2244              util_prim_is_points_or_lines(rast_prim))
2245             si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
2246 
2247          sctx->current_rast_prim = rast_prim;
2248          sctx->do_update_shaders = true;
2249       }
2250    }
2251 
2252    if (IS_DRAW_VERTEX_STATE) {
2253       /* draw_vertex_state doesn't use the current vertex buffers and vertex elements,
2254        * so disable any non-trivial VS prolog that is based on them, such as vertex
2255        * format lowering.
2256        */
2257       if (!sctx->force_trivial_vs_prolog) {
2258          sctx->force_trivial_vs_prolog = true;
2259 
2260          /* Update shaders to disable the non-trivial VS prolog. */
2261          if (sctx->uses_nontrivial_vs_prolog) {
2262             si_vs_key_update_inputs(sctx);
2263             sctx->do_update_shaders = true;
2264          }
2265       }
2266    } else {
2267       if (sctx->force_trivial_vs_prolog) {
2268          sctx->force_trivial_vs_prolog = false;
2269 
2270          /* Update shaders to enable the non-trivial VS prolog. */
2271          if (sctx->uses_nontrivial_vs_prolog) {
2272             si_vs_key_update_inputs(sctx);
2273             sctx->do_update_shaders = true;
2274          }
2275       }
2276    }
2277 
2278    /* Update NGG culling settings. */
2279    uint16_t old_ngg_culling = sctx->ngg_culling;
2280    if (GFX_VERSION >= GFX10) {
2281       struct si_shader_selector *hw_vs = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->cso;
2282 
2283       if (NGG &&
2284           /* Tessellation and GS set ngg_cull_vert_threshold to UINT_MAX if the prim type
2285            * is not points, so this check is only needed for VS. */
2286           (HAS_TESS || HAS_GS || util_rast_prim_is_lines_or_triangles(sctx->current_rast_prim)) &&
2287           /* Only the first draw for a shader starts with culling disabled and it's disabled
2288            * until we pass the total_direct_count check and then it stays enabled until
2289            * the shader is changed. This eliminates most culling on/off state changes. */
2290           (old_ngg_culling || total_direct_count > hw_vs->ngg_cull_vert_threshold)) {
2291          /* Check that the current shader allows culling. */
2292          assert(hw_vs->ngg_cull_vert_threshold != UINT_MAX);
2293 
2294          uint16_t ngg_culling;
2295 
2296          if (util_prim_is_lines(sctx->current_rast_prim)) {
2297             /* Overwrite it to mask out face cull flags. */
2298             ngg_culling = rs->ngg_cull_flags_lines;
2299          } else {
2300             ngg_culling = sctx->viewport0_y_inverted ? rs->ngg_cull_flags_tris_y_inverted :
2301                                                        rs->ngg_cull_flags_tris;
2302             assert(ngg_culling); /* rasterizer state should always set this to non-zero */
2303          }
2304 
2305          if (ngg_culling != old_ngg_culling) {
2306             /* If shader compilation is not ready, this setting will be rejected. */
2307             sctx->ngg_culling = ngg_culling;
2308             sctx->do_update_shaders = true;
2309          }
2310       } else if (old_ngg_culling) {
2311          sctx->ngg_culling = 0;
2312          sctx->do_update_shaders = true;
2313       }
2314    }
2315 
2316    if (unlikely(sctx->do_update_shaders)) {
2317       if (unlikely(!(si_update_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG>(sctx)))) {
2318          DRAW_CLEANUP;
2319          return;
2320       }
2321 
2322       /* si_update_shaders can clear the ngg_culling in the shader key if the shader compilation
2323        * hasn't finished. Set it to the correct value in si_context.
2324        */
2325       if (GFX_VERSION >= GFX10 && NGG)
2326          sctx->ngg_culling = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->key.ge.opt.ngg_culling;
2327    }
2328 
2329    /* Since we've called si_context_add_resource_size for vertex buffers,
2330     * this must be called after si_need_cs_space, because we must let
2331     * need_cs_space flush before we add buffers to the buffer list.
2332     *
2333     * This must be done after si_update_shaders because si_update_shaders can
2334     * flush the CS when enabling tess and GS rings.
2335     */
2336    if (sctx->bo_list_add_all_gfx_resources)
2337       si_gfx_resources_add_all_to_bo_list(sctx);
2338 
2339    /* Graphics shader descriptors must be uploaded after si_update_shaders because
2340     * it binds tess and GS ring buffers.
2341     */
2342    if (unlikely(!si_upload_graphics_shader_descriptors(sctx))) {
2343       DRAW_CLEANUP;
2344       return;
2345    }
2346 
2347    /* Vega10/Raven scissor bug workaround. When any context register is
2348     * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
2349     * registers must be written too.
2350     */
2351    unsigned masked_atoms = 0;
2352    bool gfx9_scissor_bug = false;
2353 
2354    if (GFX_VERSION == GFX9 && sctx->screen->info.has_gfx9_scissor_bug) {
2355       masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
2356       gfx9_scissor_bug = true;
2357 
2358       if ((!IS_DRAW_VERTEX_STATE && indirect && indirect->count_from_stream_output) ||
2359           sctx->dirty_atoms & si_atoms_that_always_roll_context() ||
2360           sctx->dirty_states & si_states_that_always_roll_context())
2361          sctx->context_roll = true;
2362    }
2363 
2364    /* Use optimal packet order based on whether we need to sync the pipeline. */
2365    if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB |
2366                                SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH |
2367                                SI_CONTEXT_VS_PARTIAL_FLUSH | SI_CONTEXT_VGT_FLUSH))) {
2368       /* If we have to wait for idle, set all states first, so that all
2369        * SET packets are processed in parallel with previous draw calls.
2370        * Then draw and prefetch at the end. This ensures that the time
2371        * the CUs are idle is very short.
2372        */
2373       if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
2374          masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
2375 
2376       /* Emit all states except possibly render condition. */
2377       si_emit_all_states<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>
2378             (sctx, info, indirect, prim, instance_count, min_direct_count,
2379              primitive_restart, masked_atoms);
2380       sctx->emit_cache_flush(sctx, &sctx->gfx_cs);
2381       /* <-- CUs are idle here. */
2382 
2383       /* This uploads VBO descriptors, sets user SGPRs, and executes the L2 prefetch.
2384        * It should done after cache flushing.
2385        */
2386       if (unlikely((!si_upload_and_prefetch_VB_descriptors
2387                         <GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE, POPCNT>
2388                         (sctx, state, partial_velem_mask)))) {
2389          DRAW_CLEANUP;
2390          return;
2391       }
2392 
2393       if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
2394          sctx->atoms.s.render_cond.emit(sctx);
2395          sctx->dirty_atoms &= ~si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
2396       }
2397 
2398       if (GFX_VERSION == GFX9 && gfx9_scissor_bug &&
2399           (sctx->context_roll || si_is_atom_dirty(sctx, &sctx->atoms.s.scissors))) {
2400          sctx->atoms.s.scissors.emit(sctx);
2401          sctx->dirty_atoms &= ~si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
2402       }
2403       assert(sctx->dirty_atoms == 0);
2404 
2405       si_emit_draw_packets<GFX_VERSION, NGG, IS_DRAW_VERTEX_STATE>
2406             (sctx, info, drawid_offset, indirect, draws, num_draws, indexbuf,
2407              index_size, index_offset, instance_count);
2408       /* <-- CUs are busy here. */
2409 
2410       /* Start prefetches after the draw has been started. Both will run
2411        * in parallel, but starting the draw first is more important.
2412        */
2413       si_prefetch_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG, PREFETCH_ALL>(sctx);
2414    } else {
2415       /* If we don't wait for idle, start prefetches first, then set
2416        * states, and draw at the end.
2417        */
2418       if (sctx->flags)
2419          sctx->emit_cache_flush(sctx, &sctx->gfx_cs);
2420 
2421       /* Only prefetch the API VS and VBO descriptors. */
2422       si_prefetch_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG, PREFETCH_BEFORE_DRAW>(sctx);
2423 
2424       /* This uploads VBO descriptors, sets user SGPRs, and executes the L2 prefetch.
2425        * It should done after cache flushing and after the VS prefetch.
2426        */
2427       if (unlikely((!si_upload_and_prefetch_VB_descriptors
2428                        <GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE, POPCNT>
2429                        (sctx, state, partial_velem_mask)))) {
2430          DRAW_CLEANUP;
2431          return;
2432       }
2433 
2434       si_emit_all_states<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>
2435             (sctx, info, indirect, prim, instance_count, min_direct_count,
2436              primitive_restart, masked_atoms);
2437 
2438       if (GFX_VERSION == GFX9 && gfx9_scissor_bug &&
2439           (sctx->context_roll || si_is_atom_dirty(sctx, &sctx->atoms.s.scissors))) {
2440          sctx->atoms.s.scissors.emit(sctx);
2441          sctx->dirty_atoms &= ~si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
2442       }
2443       assert(sctx->dirty_atoms == 0);
2444 
2445       si_emit_draw_packets<GFX_VERSION, NGG, IS_DRAW_VERTEX_STATE>
2446             (sctx, info, drawid_offset, indirect, draws, num_draws, indexbuf,
2447              index_size, index_offset, instance_count);
2448 
2449       /* Prefetch the remaining shaders after the draw has been
2450        * started. */
2451       si_prefetch_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG, PREFETCH_AFTER_DRAW>(sctx);
2452    }
2453 
2454    /* Clear the context roll flag after the draw call.
2455     * Only used by the gfx9 scissor bug.
2456     */
2457    if (GFX_VERSION == GFX9)
2458       sctx->context_roll = false;
2459 
2460    if (unlikely(sctx->current_saved_cs)) {
2461       si_trace_emit(sctx);
2462       si_log_draw_state(sctx, sctx->log);
2463    }
2464 
2465    /* Workaround for a VGT hang when streamout is enabled.
2466     * It must be done after drawing. */
2467    if (((GFX_VERSION == GFX7 && sctx->family == CHIP_HAWAII) ||
2468         (GFX_VERSION == GFX8 && (sctx->family == CHIP_TONGA || sctx->family == CHIP_FIJI))) &&
2469        si_get_strmout_en(sctx)) {
2470       sctx->flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
2471    }
2472 
2473    if (unlikely(sctx->decompression_enabled)) {
2474       sctx->num_decompress_calls++;
2475    } else {
2476       sctx->num_draw_calls += num_draws;
2477       if (primitive_restart)
2478          sctx->num_prim_restart_calls += num_draws;
2479    }
2480 
2481    if (sctx->framebuffer.state.zsbuf) {
2482       struct si_texture *zstex = (struct si_texture *)sctx->framebuffer.state.zsbuf->texture;
2483       zstex->depth_cleared_level_mask &= ~BITFIELD_BIT(sctx->framebuffer.state.zsbuf->u.tex.level);
2484    }
2485 
2486    DRAW_CLEANUP;
2487 }
2488 
2489 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
si_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2490 static void si_draw_vbo(struct pipe_context *ctx,
2491                         const struct pipe_draw_info *info,
2492                         unsigned drawid_offset,
2493                         const struct pipe_draw_indirect_info *indirect,
2494                         const struct pipe_draw_start_count_bias *draws,
2495                         unsigned num_draws)
2496 {
2497    si_draw<GFX_VERSION, HAS_TESS, HAS_GS, NGG, DRAW_VERTEX_STATE_OFF, POPCNT_NO>
2498       (ctx, info, drawid_offset, indirect, draws, num_draws, NULL, 0);
2499 }
2500 
2501 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
2502           util_popcnt POPCNT>
si_draw_vertex_state(struct pipe_context * ctx,struct pipe_vertex_state * vstate,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2503 static void si_draw_vertex_state(struct pipe_context *ctx,
2504                                  struct pipe_vertex_state *vstate,
2505                                  uint32_t partial_velem_mask,
2506                                  struct pipe_draw_vertex_state_info info,
2507                                  const struct pipe_draw_start_count_bias *draws,
2508                                  unsigned num_draws)
2509 {
2510    struct si_vertex_state *state = (struct si_vertex_state *)vstate;
2511    struct pipe_draw_info dinfo = {};
2512 
2513    dinfo.mode = info.mode;
2514    dinfo.index_size = 4;
2515    dinfo.instance_count = 1;
2516    dinfo.index.resource = state->b.input.indexbuf;
2517 
2518    si_draw<GFX_VERSION, HAS_TESS, HAS_GS, NGG, DRAW_VERTEX_STATE_ON, POPCNT>
2519       (ctx, &dinfo, 0, NULL, draws, num_draws, vstate, partial_velem_mask);
2520 
2521    if (info.take_vertex_state_ownership)
2522       pipe_vertex_state_reference(&vstate, NULL);
2523 }
2524 
si_draw_rectangle(struct blitter_context * blitter,void * vertex_elements_cso,blitter_get_vs_func get_vs,int x1,int y1,int x2,int y2,float depth,unsigned num_instances,enum blitter_attrib_type type,const union blitter_attrib * attrib)2525 static void si_draw_rectangle(struct blitter_context *blitter, void *vertex_elements_cso,
2526                               blitter_get_vs_func get_vs, int x1, int y1, int x2, int y2,
2527                               float depth, unsigned num_instances, enum blitter_attrib_type type,
2528                               const union blitter_attrib *attrib)
2529 {
2530    struct pipe_context *pipe = util_blitter_get_pipe(blitter);
2531    struct si_context *sctx = (struct si_context *)pipe;
2532 
2533    /* Pack position coordinates as signed int16. */
2534    sctx->vs_blit_sh_data[0] = (uint32_t)(x1 & 0xffff) | ((uint32_t)(y1 & 0xffff) << 16);
2535    sctx->vs_blit_sh_data[1] = (uint32_t)(x2 & 0xffff) | ((uint32_t)(y2 & 0xffff) << 16);
2536    sctx->vs_blit_sh_data[2] = fui(depth);
2537 
2538    switch (type) {
2539    case UTIL_BLITTER_ATTRIB_COLOR:
2540       memcpy(&sctx->vs_blit_sh_data[3], attrib->color, sizeof(float) * 4);
2541       break;
2542    case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
2543    case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
2544       memcpy(&sctx->vs_blit_sh_data[3], &attrib->texcoord, sizeof(attrib->texcoord));
2545       break;
2546    case UTIL_BLITTER_ATTRIB_NONE:;
2547    }
2548 
2549    pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
2550 
2551    struct pipe_draw_info info = {};
2552    struct pipe_draw_start_count_bias draw;
2553 
2554    info.mode = SI_PRIM_RECTANGLE_LIST;
2555    info.instance_count = num_instances;
2556 
2557    draw.start = 0;
2558    draw.count = 3;
2559 
2560    /* Don't set per-stage shader pointers for VS. */
2561    sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(VERTEX);
2562    sctx->vertex_buffer_pointer_dirty = false;
2563    sctx->vertex_buffer_user_sgprs_dirty = false;
2564 
2565    pipe->draw_vbo(pipe, &info, 0, NULL, &draw, 1);
2566 }
2567 
2568 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
si_init_draw_vbo(struct si_context * sctx)2569 static void si_init_draw_vbo(struct si_context *sctx)
2570 {
2571    if (NGG && GFX_VERSION < GFX10)
2572       return;
2573 
2574    sctx->draw_vbo[HAS_TESS][HAS_GS][NGG] =
2575       si_draw_vbo<GFX_VERSION, HAS_TESS, HAS_GS, NGG>;
2576 
2577    if (util_get_cpu_caps()->has_popcnt) {
2578       sctx->draw_vertex_state[HAS_TESS][HAS_GS][NGG] =
2579          si_draw_vertex_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG, POPCNT_YES>;
2580    } else {
2581       sctx->draw_vertex_state[HAS_TESS][HAS_GS][NGG] =
2582          si_draw_vertex_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG, POPCNT_NO>;
2583    }
2584 }
2585 
2586 template <chip_class GFX_VERSION>
si_init_draw_vbo_all_pipeline_options(struct si_context * sctx)2587 static void si_init_draw_vbo_all_pipeline_options(struct si_context *sctx)
2588 {
2589    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_OFF, NGG_OFF>(sctx);
2590    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_ON,  NGG_OFF>(sctx);
2591    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_OFF, NGG_OFF>(sctx);
2592    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_ON,  NGG_OFF>(sctx);
2593    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_OFF, NGG_ON>(sctx);
2594    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_ON,  NGG_ON>(sctx);
2595    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_OFF, NGG_ON>(sctx);
2596    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_ON,  NGG_ON>(sctx);
2597 }
2598 
si_invalid_draw_vbo(struct pipe_context * pipe,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2599 static void si_invalid_draw_vbo(struct pipe_context *pipe,
2600                                 const struct pipe_draw_info *info,
2601                                 unsigned drawid_offset,
2602                                 const struct pipe_draw_indirect_info *indirect,
2603                                 const struct pipe_draw_start_count_bias *draws,
2604                                 unsigned num_draws)
2605 {
2606    unreachable("vertex shader not bound");
2607 }
2608 
si_invalid_draw_vertex_state(struct pipe_context * ctx,struct pipe_vertex_state * vstate,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2609 static void si_invalid_draw_vertex_state(struct pipe_context *ctx,
2610                                          struct pipe_vertex_state *vstate,
2611                                          uint32_t partial_velem_mask,
2612                                          struct pipe_draw_vertex_state_info info,
2613                                          const struct pipe_draw_start_count_bias *draws,
2614                                          unsigned num_draws)
2615 {
2616    unreachable("vertex shader not bound");
2617 }
2618 
2619 extern "C"
GFX(si_init_draw_functions_)2620 void GFX(si_init_draw_functions_)(struct si_context *sctx)
2621 {
2622    assert(sctx->chip_class == GFX());
2623 
2624    si_init_draw_vbo_all_pipeline_options<GFX()>(sctx);
2625 
2626    /* Bind a fake draw_vbo, so that draw_vbo isn't NULL, which would skip
2627     * initialization of callbacks in upper layers (such as u_threaded_context).
2628     */
2629    sctx->b.draw_vbo = si_invalid_draw_vbo;
2630    sctx->b.draw_vertex_state = si_invalid_draw_vertex_state;
2631    sctx->blitter->draw_rectangle = si_draw_rectangle;
2632 
2633    si_init_ia_multi_vgt_param_table(sctx);
2634 }
2635 
2636 #if GFX_VER == 6 /* declare this function only once because it supports all chips. */
2637 
2638 extern "C"
si_init_spi_map_functions(struct si_context * sctx)2639 void si_init_spi_map_functions(struct si_context *sctx)
2640 {
2641    /* This unrolls the loops in si_emit_spi_map and inlines memcmp and memcpys.
2642     * It improves performance for viewperf/snx.
2643     */
2644    sctx->emit_spi_map[0] = si_emit_spi_map<0>;
2645    sctx->emit_spi_map[1] = si_emit_spi_map<1>;
2646    sctx->emit_spi_map[2] = si_emit_spi_map<2>;
2647    sctx->emit_spi_map[3] = si_emit_spi_map<3>;
2648    sctx->emit_spi_map[4] = si_emit_spi_map<4>;
2649    sctx->emit_spi_map[5] = si_emit_spi_map<5>;
2650    sctx->emit_spi_map[6] = si_emit_spi_map<6>;
2651    sctx->emit_spi_map[7] = si_emit_spi_map<7>;
2652    sctx->emit_spi_map[8] = si_emit_spi_map<8>;
2653    sctx->emit_spi_map[9] = si_emit_spi_map<9>;
2654    sctx->emit_spi_map[10] = si_emit_spi_map<10>;
2655    sctx->emit_spi_map[11] = si_emit_spi_map<11>;
2656    sctx->emit_spi_map[12] = si_emit_spi_map<12>;
2657    sctx->emit_spi_map[13] = si_emit_spi_map<13>;
2658    sctx->emit_spi_map[14] = si_emit_spi_map<14>;
2659    sctx->emit_spi_map[15] = si_emit_spi_map<15>;
2660    sctx->emit_spi_map[16] = si_emit_spi_map<16>;
2661    sctx->emit_spi_map[17] = si_emit_spi_map<17>;
2662    sctx->emit_spi_map[18] = si_emit_spi_map<18>;
2663    sctx->emit_spi_map[19] = si_emit_spi_map<19>;
2664    sctx->emit_spi_map[20] = si_emit_spi_map<20>;
2665    sctx->emit_spi_map[21] = si_emit_spi_map<21>;
2666    sctx->emit_spi_map[22] = si_emit_spi_map<22>;
2667    sctx->emit_spi_map[23] = si_emit_spi_map<23>;
2668    sctx->emit_spi_map[24] = si_emit_spi_map<24>;
2669    sctx->emit_spi_map[25] = si_emit_spi_map<25>;
2670    sctx->emit_spi_map[26] = si_emit_spi_map<26>;
2671    sctx->emit_spi_map[27] = si_emit_spi_map<27>;
2672    sctx->emit_spi_map[28] = si_emit_spi_map<28>;
2673    sctx->emit_spi_map[29] = si_emit_spi_map<29>;
2674    sctx->emit_spi_map[30] = si_emit_spi_map<30>;
2675    sctx->emit_spi_map[31] = si_emit_spi_map<31>;
2676    sctx->emit_spi_map[32] = si_emit_spi_map<32>;
2677 }
2678 
2679 #endif
2680