1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "ac_exp_param.h"
26 #include "ac_sqtt.h"
27 #include "si_build_pm4.h"
28 #include "util/u_cpu_detect.h"
29 #include "util/u_index_modify.h"
30 #include "util/u_prim.h"
31 #include "util/u_upload_mgr.h"
32 
33 #if (GFX_VER == 6)
34 #define GFX(name) name##GFX6
35 #elif (GFX_VER == 7)
36 #define GFX(name) name##GFX7
37 #elif (GFX_VER == 8)
38 #define GFX(name) name##GFX8
39 #elif (GFX_VER == 9)
40 #define GFX(name) name##GFX9
41 #elif (GFX_VER == 10)
42 #define GFX(name) name##GFX10
43 #elif (GFX_VER == 103)
44 #define GFX(name) name##GFX10_3
45 #else
46 #error "Unknown gfx version"
47 #endif
48 
49 /* special primitive types */
50 #define SI_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
51 
52 template<int NUM_INTERP>
si_emit_spi_map(struct si_context * sctx)53 static void si_emit_spi_map(struct si_context *sctx)
54 {
55    struct si_shader *ps = sctx->shader.ps.current;
56    struct si_shader_info *psinfo = ps ? &ps->selector->info : NULL;
57    unsigned spi_ps_input_cntl[NUM_INTERP];
58 
59    STATIC_ASSERT(NUM_INTERP >= 0 && NUM_INTERP <= 32);
60 
61    if (!NUM_INTERP)
62       return;
63 
64    struct si_shader *vs = si_get_vs(sctx)->current;
65    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
66 
67    for (unsigned i = 0; i < NUM_INTERP; i++) {
68       union si_input_info input = psinfo->input[i];
69       unsigned ps_input_cntl = vs->info.vs_output_ps_input_cntl[input.semantic];
70       bool non_default_val = G_028644_OFFSET(ps_input_cntl) != 0x20;
71 
72       if (non_default_val) {
73          if (input.interpolate == INTERP_MODE_FLAT ||
74              (input.interpolate == INTERP_MODE_COLOR && rs->flatshade))
75             ps_input_cntl |= S_028644_FLAT_SHADE(1);
76 
77          if (input.fp16_lo_hi_valid) {
78             ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) |
79                              S_028644_ATTR0_VALID(1) | /* this must be set if FP16_INTERP_MODE is set */
80                              S_028644_ATTR1_VALID(!!(input.fp16_lo_hi_valid & 0x2));
81          }
82       }
83 
84       if (input.semantic == VARYING_SLOT_PNTC ||
85           (input.semantic >= VARYING_SLOT_TEX0 && input.semantic <= VARYING_SLOT_TEX7 &&
86            rs->sprite_coord_enable & (1 << (input.semantic - VARYING_SLOT_TEX0)))) {
87          /* Overwrite the whole value (except OFFSET) for sprite coordinates. */
88          ps_input_cntl &= ~C_028644_OFFSET;
89          ps_input_cntl |= S_028644_PT_SPRITE_TEX(1);
90          if (input.fp16_lo_hi_valid & 0x1) {
91             ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) |
92                              S_028644_ATTR0_VALID(1);
93          }
94       }
95 
96       spi_ps_input_cntl[i] = ps_input_cntl;
97    }
98 
99    /* R_028644_SPI_PS_INPUT_CNTL_0 */
100    /* Dota 2: Only ~16% of SPI map updates set different values. */
101    /* Talos: Only ~9% of SPI map updates set different values. */
102    radeon_begin(&sctx->gfx_cs);
103    radeon_opt_set_context_regn(sctx, R_028644_SPI_PS_INPUT_CNTL_0, spi_ps_input_cntl,
104                                sctx->tracked_regs.spi_ps_input_cntl, NUM_INTERP);
105    radeon_end_update_context_roll(sctx);
106 }
107 
108 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
si_update_shaders(struct si_context * sctx)109 static bool si_update_shaders(struct si_context *sctx)
110 {
111    struct pipe_context *ctx = (struct pipe_context *)sctx;
112    struct si_shader *old_vs = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current;
113    unsigned old_pa_cl_vs_out_cntl = old_vs ? old_vs->pa_cl_vs_out_cntl : 0;
114    struct si_shader *old_ps = sctx->shader.ps.current;
115    unsigned old_spi_shader_col_format =
116       old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0;
117    int r;
118 
119    /* Update TCS and TES. */
120    if (HAS_TESS) {
121       if (!sctx->tess_rings) {
122          si_init_tess_factor_ring(sctx);
123          if (!sctx->tess_rings)
124             return false;
125       }
126 
127       if (sctx->shader.tcs.cso) {
128          r = si_shader_select(ctx, &sctx->shader.tcs);
129          if (r)
130             return false;
131          si_pm4_bind_state(sctx, hs, sctx->shader.tcs.current);
132       } else {
133          if (!sctx->fixed_func_tcs_shader.cso) {
134             sctx->fixed_func_tcs_shader.cso =
135                (struct si_shader_selector*)si_create_fixed_func_tcs(sctx);
136             if (!sctx->fixed_func_tcs_shader.cso)
137                return false;
138 
139             sctx->fixed_func_tcs_shader.key.part.tcs.epilog.invoc0_tess_factors_are_def =
140                sctx->fixed_func_tcs_shader.cso->info.tessfactors_are_def_in_all_invocs;
141          }
142 
143          r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader);
144          if (r)
145             return false;
146          si_pm4_bind_state(sctx, hs, sctx->fixed_func_tcs_shader.current);
147       }
148 
149       if (!HAS_GS || GFX_VERSION <= GFX8) {
150          r = si_shader_select(ctx, &sctx->shader.tes);
151          if (r)
152             return false;
153 
154          if (HAS_GS) {
155             /* TES as ES */
156             assert(GFX_VERSION <= GFX8);
157             si_pm4_bind_state(sctx, es, sctx->shader.tes.current);
158          } else if (NGG) {
159             si_pm4_bind_state(sctx, gs, sctx->shader.tes.current);
160          } else {
161             si_pm4_bind_state(sctx, vs, sctx->shader.tes.current);
162          }
163       }
164    } else {
165       if (GFX_VERSION <= GFX8) {
166          si_pm4_bind_state(sctx, ls, NULL);
167          sctx->prefetch_L2_mask &= ~SI_PREFETCH_LS;
168       }
169       si_pm4_bind_state(sctx, hs, NULL);
170       sctx->prefetch_L2_mask &= ~SI_PREFETCH_HS;
171    }
172 
173    /* Update GS. */
174    if (HAS_GS) {
175       r = si_shader_select(ctx, &sctx->shader.gs);
176       if (r)
177          return false;
178       si_pm4_bind_state(sctx, gs, sctx->shader.gs.current);
179       if (!NGG) {
180          si_pm4_bind_state(sctx, vs, sctx->shader.gs.cso->gs_copy_shader);
181 
182          if (!si_update_gs_ring_buffers(sctx))
183             return false;
184       } else {
185          si_pm4_bind_state(sctx, vs, NULL);
186          sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
187       }
188    } else {
189       if (!NGG) {
190          si_pm4_bind_state(sctx, gs, NULL);
191          sctx->prefetch_L2_mask &= ~SI_PREFETCH_GS;
192          if (GFX_VERSION <= GFX8) {
193             si_pm4_bind_state(sctx, es, NULL);
194             sctx->prefetch_L2_mask &= ~SI_PREFETCH_ES;
195          }
196       }
197    }
198 
199    /* Update VS. */
200    if ((!HAS_TESS && !HAS_GS) || GFX_VERSION <= GFX8) {
201       r = si_shader_select(ctx, &sctx->shader.vs);
202       if (r)
203          return false;
204 
205       if (!HAS_TESS && !HAS_GS) {
206          if (NGG) {
207             si_pm4_bind_state(sctx, gs, sctx->shader.vs.current);
208             si_pm4_bind_state(sctx, vs, NULL);
209             sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
210          } else {
211             si_pm4_bind_state(sctx, vs, sctx->shader.vs.current);
212          }
213       } else if (HAS_TESS) {
214          si_pm4_bind_state(sctx, ls, sctx->shader.vs.current);
215       } else {
216          assert(HAS_GS);
217          si_pm4_bind_state(sctx, es, sctx->shader.vs.current);
218       }
219    }
220 
221    if (GFX_VERSION >= GFX9 && HAS_TESS)
222       sctx->vs_uses_base_instance = sctx->queued.named.hs->uses_base_instance;
223    else if (GFX_VERSION >= GFX9 && HAS_GS)
224       sctx->vs_uses_base_instance = sctx->shader.gs.current->uses_base_instance;
225    else
226       sctx->vs_uses_base_instance = sctx->shader.vs.current->uses_base_instance;
227 
228    union si_vgt_stages_key key;
229    key.index = 0;
230 
231    /* Update VGT_SHADER_STAGES_EN. */
232    if (HAS_TESS)
233       key.u.tess = 1;
234    if (HAS_GS)
235       key.u.gs = 1;
236    if (NGG)
237       key.index |= si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->ctx_reg.ngg.vgt_stages.index;
238 
239    struct si_pm4_state **pm4 = &sctx->vgt_shader_config[key.index];
240    if (unlikely(!*pm4))
241       *pm4 = si_build_vgt_shader_config(sctx->screen, key);
242    si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
243 
244    if (old_pa_cl_vs_out_cntl !=
245           si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->pa_cl_vs_out_cntl)
246       si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
247 
248    r = si_shader_select(ctx, &sctx->shader.ps);
249    if (r)
250       return false;
251    si_pm4_bind_state(sctx, ps, sctx->shader.ps.current);
252 
253    if (si_pm4_state_changed(sctx, ps) ||
254        (!NGG && si_pm4_state_changed(sctx, vs)) ||
255        (NGG && si_pm4_state_changed(sctx, gs))) {
256       sctx->atoms.s.spi_map.emit = sctx->emit_spi_map[sctx->shader.ps.current->ctx_reg.ps.num_interp];
257       si_mark_atom_dirty(sctx, &sctx->atoms.s.spi_map);
258    }
259 
260    if ((GFX_VERSION >= GFX10_3 || (GFX_VERSION >= GFX9 && sctx->screen->info.rbplus_allowed)) &&
261        si_pm4_state_changed(sctx, ps) &&
262        (!old_ps || old_spi_shader_col_format !=
263                       sctx->shader.ps.current->key.part.ps.epilog.spi_shader_col_format))
264       si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
265 
266    if (sctx->smoothing_enabled !=
267        sctx->shader.ps.current->key.part.ps.epilog.poly_line_smoothing) {
268       sctx->smoothing_enabled = sctx->shader.ps.current->key.part.ps.epilog.poly_line_smoothing;
269       si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
270 
271       /* NGG cull state uses smoothing_enabled. */
272       if (GFX_VERSION >= GFX10 && sctx->screen->use_ngg_culling)
273          si_mark_atom_dirty(sctx, &sctx->atoms.s.ngg_cull_state);
274 
275       if (GFX_VERSION == GFX6)
276          si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
277 
278       if (sctx->framebuffer.nr_samples <= 1)
279          si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_sample_locs);
280    }
281 
282    if (unlikely(sctx->screen->debug_flags & DBG(SQTT) && sctx->thread_trace)) {
283       /* Pretend the bound shaders form a vk pipeline */
284       uint32_t pipeline_code_hash = 0;
285       uint64_t base_address = ~0;
286 
287       for (int i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
288          struct si_shader *shader = sctx->shaders[i].current;
289          if (sctx->shaders[i].cso && shader) {
290             pipeline_code_hash = _mesa_hash_data_with_seed(
291                shader->binary.elf_buffer,
292                shader->binary.elf_size,
293                pipeline_code_hash);
294             base_address = MIN2(base_address,
295                                 shader->bo->gpu_address);
296          }
297       }
298 
299       struct ac_thread_trace_data *thread_trace_data = sctx->thread_trace;
300       if (!si_sqtt_pipeline_is_registered(thread_trace_data, pipeline_code_hash)) {
301          si_sqtt_register_pipeline(sctx, pipeline_code_hash, base_address, false);
302       }
303 
304       si_sqtt_describe_pipeline_bind(sctx, pipeline_code_hash, 0);
305    }
306 
307    if ((GFX_VERSION <= GFX8 &&
308         (si_pm4_state_enabled_and_changed(sctx, ls) || si_pm4_state_enabled_and_changed(sctx, es))) ||
309        si_pm4_state_enabled_and_changed(sctx, hs) || si_pm4_state_enabled_and_changed(sctx, gs) ||
310        si_pm4_state_enabled_and_changed(sctx, vs) || si_pm4_state_enabled_and_changed(sctx, ps)) {
311       unsigned scratch_size = 0;
312 
313       if (HAS_TESS) {
314          if (GFX_VERSION <= GFX8) /* LS */
315             scratch_size = MAX2(scratch_size, sctx->shader.vs.current->config.scratch_bytes_per_wave);
316 
317          scratch_size = MAX2(scratch_size, sctx->queued.named.hs->config.scratch_bytes_per_wave);
318 
319          if (HAS_GS) {
320             if (GFX_VERSION <= GFX8) /* ES */
321                scratch_size = MAX2(scratch_size, sctx->shader.tes.current->config.scratch_bytes_per_wave);
322 
323             scratch_size = MAX2(scratch_size, sctx->shader.gs.current->config.scratch_bytes_per_wave);
324          } else {
325             scratch_size = MAX2(scratch_size, sctx->shader.tes.current->config.scratch_bytes_per_wave);
326          }
327       } else if (HAS_GS) {
328          if (GFX_VERSION <= GFX8) /* ES */
329             scratch_size = MAX2(scratch_size, sctx->shader.vs.current->config.scratch_bytes_per_wave);
330 
331          scratch_size = MAX2(scratch_size, sctx->shader.gs.current->config.scratch_bytes_per_wave);
332       } else {
333          scratch_size = MAX2(scratch_size, sctx->shader.vs.current->config.scratch_bytes_per_wave);
334       }
335 
336       scratch_size = MAX2(scratch_size, sctx->shader.ps.current->config.scratch_bytes_per_wave);
337 
338       if (scratch_size && !si_update_spi_tmpring_size(sctx, scratch_size))
339          return false;
340 
341       if (GFX_VERSION >= GFX7) {
342          if (GFX_VERSION <= GFX8 && HAS_TESS && si_pm4_state_enabled_and_changed(sctx, ls))
343             sctx->prefetch_L2_mask |= SI_PREFETCH_LS;
344 
345          if (HAS_TESS && si_pm4_state_enabled_and_changed(sctx, hs))
346             sctx->prefetch_L2_mask |= SI_PREFETCH_HS;
347 
348          if (GFX_VERSION <= GFX8 && HAS_GS && si_pm4_state_enabled_and_changed(sctx, es))
349             sctx->prefetch_L2_mask |= SI_PREFETCH_ES;
350 
351          if ((HAS_GS || NGG) && si_pm4_state_enabled_and_changed(sctx, gs))
352             sctx->prefetch_L2_mask |= SI_PREFETCH_GS;
353 
354          if (!NGG && si_pm4_state_enabled_and_changed(sctx, vs))
355             sctx->prefetch_L2_mask |= SI_PREFETCH_VS;
356 
357          if (si_pm4_state_enabled_and_changed(sctx, ps))
358             sctx->prefetch_L2_mask |= SI_PREFETCH_PS;
359       }
360    }
361 
362    sctx->do_update_shaders = false;
363    return true;
364 }
365 
366 ALWAYS_INLINE
si_conv_pipe_prim(unsigned mode)367 static unsigned si_conv_pipe_prim(unsigned mode)
368 {
369    static const unsigned prim_conv[] = {
370       [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
371       [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
372       [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
373       [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
374       [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
375       [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
376       [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
377       [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
378       [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
379       [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
380       [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
381       [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
382       [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
383       [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
384       [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
385       [SI_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST};
386    assert(mode < ARRAY_SIZE(prim_conv));
387    return prim_conv[mode];
388 }
389 
si_prefetch_shader_async(struct si_context * sctx,struct si_shader * shader)390 static void si_prefetch_shader_async(struct si_context *sctx, struct si_shader *shader)
391 {
392    struct pipe_resource *bo = &shader->bo->b.b;
393 
394    si_cp_dma_prefetch(sctx, bo, 0, bo->width0);
395 }
396 
397 enum si_L2_prefetch_mode {
398    PREFETCH_BEFORE_DRAW = 1,
399    PREFETCH_AFTER_DRAW,
400    PREFETCH_ALL,
401 };
402 
403 /**
404  * Prefetch shaders.
405  */
406 template<chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
407          si_L2_prefetch_mode mode>
si_prefetch_shaders(struct si_context * sctx)408 static void si_prefetch_shaders(struct si_context *sctx)
409 {
410    unsigned mask = sctx->prefetch_L2_mask;
411 
412    /* GFX6 doesn't support the L2 prefetch. */
413    if (GFX_VERSION < GFX7 || !mask)
414       return;
415 
416    /* Prefetch shaders and VBO descriptors to TC L2. */
417    if (GFX_VERSION >= GFX9) {
418       /* Choose the right spot for the VBO prefetch. */
419       if (HAS_TESS) {
420          if (mode != PREFETCH_AFTER_DRAW) {
421             if (mask & SI_PREFETCH_HS)
422                si_prefetch_shader_async(sctx, sctx->queued.named.hs);
423 
424             if (mode == PREFETCH_BEFORE_DRAW)
425                return;
426          }
427 
428          if ((HAS_GS || NGG) && mask & SI_PREFETCH_GS)
429             si_prefetch_shader_async(sctx, sctx->queued.named.gs);
430          if (!NGG && mask & SI_PREFETCH_VS)
431             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
432       } else if (HAS_GS || NGG) {
433          if (mode != PREFETCH_AFTER_DRAW) {
434             if (mask & SI_PREFETCH_GS)
435                si_prefetch_shader_async(sctx, sctx->queued.named.gs);
436 
437             if (mode == PREFETCH_BEFORE_DRAW)
438                return;
439          }
440 
441          if (!NGG && mask & SI_PREFETCH_VS)
442             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
443       } else {
444          if (mode != PREFETCH_AFTER_DRAW) {
445             if (mask & SI_PREFETCH_VS)
446                si_prefetch_shader_async(sctx, sctx->queued.named.vs);
447 
448             if (mode == PREFETCH_BEFORE_DRAW)
449                return;
450          }
451       }
452    } else {
453       /* GFX6-GFX8 */
454       /* Choose the right spot for the VBO prefetch. */
455       if (HAS_TESS) {
456          if (mode != PREFETCH_AFTER_DRAW) {
457             if (mask & SI_PREFETCH_LS)
458                si_prefetch_shader_async(sctx, sctx->queued.named.ls);
459 
460             if (mode == PREFETCH_BEFORE_DRAW)
461                return;
462          }
463 
464          if (mask & SI_PREFETCH_HS)
465             si_prefetch_shader_async(sctx, sctx->queued.named.hs);
466          if (mask & SI_PREFETCH_ES)
467             si_prefetch_shader_async(sctx, sctx->queued.named.es);
468          if (mask & SI_PREFETCH_GS)
469             si_prefetch_shader_async(sctx, sctx->queued.named.gs);
470          if (mask & SI_PREFETCH_VS)
471             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
472       } else if (HAS_GS) {
473          if (mode != PREFETCH_AFTER_DRAW) {
474             if (mask & SI_PREFETCH_ES)
475                si_prefetch_shader_async(sctx, sctx->queued.named.es);
476 
477             if (mode == PREFETCH_BEFORE_DRAW)
478                return;
479          }
480 
481          if (mask & SI_PREFETCH_GS)
482             si_prefetch_shader_async(sctx, sctx->queued.named.gs);
483          if (mask & SI_PREFETCH_VS)
484             si_prefetch_shader_async(sctx, sctx->queued.named.vs);
485       } else {
486          if (mode != PREFETCH_AFTER_DRAW) {
487             if (mask & SI_PREFETCH_VS)
488                si_prefetch_shader_async(sctx, sctx->queued.named.vs);
489 
490             if (mode == PREFETCH_BEFORE_DRAW)
491                return;
492          }
493       }
494    }
495 
496    if (mask & SI_PREFETCH_PS)
497       si_prefetch_shader_async(sctx, sctx->queued.named.ps);
498 
499    /* This must be cleared only when AFTER_DRAW is true. */
500    sctx->prefetch_L2_mask = 0;
501 }
502 
503 /**
504  * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
505  * LS.LDS_SIZE is shared by all 3 shader stages.
506  *
507  * The information about LDS and other non-compile-time parameters is then
508  * written to userdata SGPRs.
509  */
si_emit_derived_tess_state(struct si_context * sctx,unsigned * num_patches)510 static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_patches)
511 {
512    struct si_shader *ls_current;
513    struct si_shader_selector *ls;
514    /* The TES pointer will only be used for sctx->last_tcs.
515     * It would be wrong to think that TCS = TES. */
516    struct si_shader_selector *tcs =
517       sctx->shader.tcs.cso ? sctx->shader.tcs.cso : sctx->shader.tes.cso;
518    unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
519    bool has_primid_instancing_bug = sctx->chip_class == GFX6 && sctx->screen->info.max_se == 1;
520    unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
521    uint8_t num_tcs_input_cp = sctx->patch_vertices;
522 
523    /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
524    if (sctx->chip_class >= GFX9) {
525       if (sctx->shader.tcs.cso)
526          ls_current = sctx->shader.tcs.current;
527       else
528          ls_current = sctx->fixed_func_tcs_shader.current;
529 
530       ls = ls_current->key.part.tcs.ls;
531    } else {
532       ls_current = sctx->shader.vs.current;
533       ls = sctx->shader.vs.cso;
534    }
535 
536    if (sctx->last_ls == ls_current && sctx->last_tcs == tcs &&
537        sctx->last_tes_sh_base == tes_sh_base && sctx->last_num_tcs_input_cp == num_tcs_input_cp &&
538        (!has_primid_instancing_bug || (sctx->last_tess_uses_primid == tess_uses_primid))) {
539       *num_patches = sctx->last_num_patches;
540       return;
541    }
542 
543    sctx->last_ls = ls_current;
544    sctx->last_tcs = tcs;
545    sctx->last_tes_sh_base = tes_sh_base;
546    sctx->last_num_tcs_input_cp = num_tcs_input_cp;
547    sctx->last_tess_uses_primid = tess_uses_primid;
548 
549    /* This calculates how shader inputs and outputs among VS, TCS, and TES
550     * are laid out in LDS. */
551    unsigned num_tcs_inputs = util_last_bit64(ls->outputs_written);
552    unsigned num_tcs_output_cp, num_tcs_outputs, num_tcs_patch_outputs;
553 
554    if (sctx->shader.tcs.cso) {
555       num_tcs_outputs = util_last_bit64(tcs->outputs_written);
556       num_tcs_output_cp = tcs->info.base.tess.tcs_vertices_out;
557       num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
558    } else {
559       /* No TCS. Route varyings from LS to TES. */
560       num_tcs_outputs = num_tcs_inputs;
561       num_tcs_output_cp = num_tcs_input_cp;
562       num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
563    }
564 
565    unsigned input_vertex_size = ls->lshs_vertex_stride;
566    unsigned output_vertex_size = num_tcs_outputs * 16;
567    unsigned input_patch_size;
568 
569    /* Allocate LDS for TCS inputs only if it's used. */
570    if (!ls_current->key.opt.same_patch_vertices ||
571        tcs->info.base.inputs_read & ~tcs->tcs_vgpr_only_inputs)
572       input_patch_size = num_tcs_input_cp * input_vertex_size;
573    else
574       input_patch_size = 0;
575 
576    unsigned pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
577    unsigned output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
578    unsigned lds_per_patch;
579 
580    /* Compute the LDS size per patch.
581     *
582     * LDS is used to store TCS outputs if they are read, and to store tess
583     * factors if they are not defined in all invocations.
584     */
585    if (tcs->info.base.outputs_read ||
586        tcs->info.base.patch_outputs_read ||
587        !tcs->info.tessfactors_are_def_in_all_invocs) {
588       lds_per_patch = input_patch_size + output_patch_size;
589    } else {
590       /* LDS will only store TCS inputs. The offchip buffer will only store TCS outputs. */
591       lds_per_patch = MAX2(input_patch_size, output_patch_size);
592    }
593 
594    /* Ensure that we only need 4 waves per CU, so that we don't need to check
595     * resource usage (such as whether we have enough VGPRs to fit the whole
596     * threadgroup into the CU). It also ensures that the number of tcs in and out
597     * vertices per threadgroup are at most 256, which is the hw limit.
598     */
599    unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
600    *num_patches = 256 / max_verts_per_patch;
601 
602    /* Not necessary for correctness, but higher numbers are slower.
603     * The hardware can do more, but the radeonsi shader constant is
604     * limited to 6 bits.
605     */
606    *num_patches = MIN2(*num_patches, 64); /* e.g. 64 triangles in exactly 3 waves */
607 
608    /* When distributed tessellation is unsupported, switch between SEs
609     * at a higher frequency to manually balance the workload between SEs.
610     */
611    if (!sctx->screen->info.has_distributed_tess && sctx->screen->info.max_se > 1)
612       *num_patches = MIN2(*num_patches, 16); /* recommended */
613 
614    /* Make sure the output data fits in the offchip buffer */
615    *num_patches =
616       MIN2(*num_patches, (sctx->screen->tess_offchip_block_dw_size * 4) / output_patch_size);
617 
618    /* Make sure that the data fits in LDS. This assumes the shaders only
619     * use LDS for the inputs and outputs.
620     *
621     * The maximum allowed LDS size is 32K. Higher numbers can hang.
622     * Use 16K as the maximum, so that we can fit 2 workgroups on the same CU.
623     */
624    ASSERTED unsigned max_lds_size = 32 * 1024; /* hw limit */
625    unsigned target_lds_size = 16 * 1024; /* target at least 2 workgroups per CU, 16K each */
626    *num_patches = MIN2(*num_patches, target_lds_size / lds_per_patch);
627    *num_patches = MAX2(*num_patches, 1);
628    assert(*num_patches * lds_per_patch <= max_lds_size);
629 
630    /* Make sure that vector lanes are fully occupied by cutting off the last wave
631     * if it's only partially filled.
632     */
633    unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
634    unsigned wave_size = sctx->screen->ge_wave_size;
635 
636    if (temp_verts_per_tg > wave_size &&
637        (wave_size - temp_verts_per_tg % wave_size >= MAX2(max_verts_per_patch, 8)))
638       *num_patches = (temp_verts_per_tg & ~(wave_size - 1)) / max_verts_per_patch;
639 
640    if (sctx->chip_class == GFX6) {
641       /* GFX6 bug workaround, related to power management. Limit LS-HS
642        * threadgroups to only one wave.
643        */
644       unsigned one_wave = wave_size / max_verts_per_patch;
645       *num_patches = MIN2(*num_patches, one_wave);
646    }
647 
648    /* The VGT HS block increments the patch ID unconditionally
649     * within a single threadgroup. This results in incorrect
650     * patch IDs when instanced draws are used.
651     *
652     * The intended solution is to restrict threadgroups to
653     * a single instance by setting SWITCH_ON_EOI, which
654     * should cause IA to split instances up. However, this
655     * doesn't work correctly on GFX6 when there is no other
656     * SE to switch to.
657     */
658    if (has_primid_instancing_bug && tess_uses_primid)
659       *num_patches = 1;
660 
661    sctx->last_num_patches = *num_patches;
662 
663    unsigned output_patch0_offset = input_patch_size * *num_patches;
664    unsigned perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
665 
666    /* Compute userdata SGPRs. */
667    assert(((input_vertex_size / 4) & ~0xff) == 0);
668    assert(((output_vertex_size / 4) & ~0xff) == 0);
669    assert(((input_patch_size / 4) & ~0x1fff) == 0);
670    assert(((output_patch_size / 4) & ~0x1fff) == 0);
671    assert(((output_patch0_offset / 16) & ~0xffff) == 0);
672    assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
673    assert(num_tcs_input_cp <= 32);
674    assert(num_tcs_output_cp <= 32);
675    assert(*num_patches <= 64);
676    assert(((pervertex_output_patch_size * *num_patches) & ~0x1fffff) == 0);
677 
678    uint64_t ring_va = (unlikely(sctx->ws->cs_is_secure(&sctx->gfx_cs)) ?
679       si_resource(sctx->tess_rings_tmz) : si_resource(sctx->tess_rings))->gpu_address;
680    assert((ring_va & u_bit_consecutive(0, 19)) == 0);
681 
682    unsigned tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
683                             S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
684    unsigned tcs_out_layout = (output_patch_size / 4) | (num_tcs_input_cp << 13) | ring_va;
685    unsigned tcs_out_offsets = (output_patch0_offset / 16) | ((perpatch_output_offset / 16) << 16);
686    unsigned offchip_layout =
687       (*num_patches - 1) | ((num_tcs_output_cp - 1) << 6) |
688       ((pervertex_output_patch_size * *num_patches) << 11);
689 
690    /* Compute the LDS size. */
691    unsigned lds_size = lds_per_patch * *num_patches;
692 
693    if (sctx->chip_class >= GFX7) {
694       assert(lds_size <= 65536);
695       lds_size = align(lds_size, 512) / 512;
696    } else {
697       assert(lds_size <= 32768);
698       lds_size = align(lds_size, 256) / 256;
699    }
700 
701    /* Set SI_SGPR_VS_STATE_BITS. */
702    sctx->current_vs_state &= C_VS_STATE_LS_OUT_PATCH_SIZE & C_VS_STATE_LS_OUT_VERTEX_SIZE;
703    sctx->current_vs_state |= tcs_in_layout;
704 
705    /* We should be able to support in-shader LDS use with LLVM >= 9
706     * by just adding the lds_sizes together, but it has never
707     * been tested. */
708    assert(ls_current->config.lds_size == 0);
709 
710    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
711    radeon_begin(cs);
712 
713    if (sctx->chip_class >= GFX9) {
714       unsigned hs_rsrc2 = ls_current->config.rsrc2;
715 
716       if (sctx->chip_class >= GFX10)
717          hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX10(lds_size);
718       else
719          hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(lds_size);
720 
721       radeon_set_sh_reg(R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
722 
723       /* Set userdata SGPRs for merged LS-HS. */
724       radeon_set_sh_reg_seq(
725          R_00B430_SPI_SHADER_USER_DATA_LS_0 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
726       radeon_emit(offchip_layout);
727       radeon_emit(tcs_out_offsets);
728       radeon_emit(tcs_out_layout);
729    } else {
730       unsigned ls_rsrc2 = ls_current->config.rsrc2;
731 
732       si_multiwave_lds_size_workaround(sctx->screen, &lds_size);
733       ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size);
734 
735       /* Due to a hw bug, RSRC2_LS must be written twice with another
736        * LS register written in between. */
737       if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
738          radeon_set_sh_reg(R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
739       radeon_set_sh_reg_seq(R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
740       radeon_emit(ls_current->config.rsrc1);
741       radeon_emit(ls_rsrc2);
742 
743       /* Set userdata SGPRs for TCS. */
744       radeon_set_sh_reg_seq(
745          R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
746       radeon_emit(offchip_layout);
747       radeon_emit(tcs_out_offsets);
748       radeon_emit(tcs_out_layout);
749       radeon_emit(tcs_in_layout);
750    }
751 
752    /* Set userdata SGPRs for TES. */
753    radeon_set_sh_reg_seq(tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
754    radeon_emit(offchip_layout);
755    radeon_emit(ring_va);
756    radeon_end();
757 
758    unsigned ls_hs_config =
759          S_028B58_NUM_PATCHES(*num_patches) |
760          S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
761          S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
762 
763    if (sctx->last_ls_hs_config != ls_hs_config) {
764       radeon_begin(cs);
765       if (sctx->chip_class >= GFX7) {
766          radeon_set_context_reg_idx(R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config);
767       } else {
768          radeon_set_context_reg(R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
769       }
770       radeon_end_update_context_roll(sctx);
771       sctx->last_ls_hs_config = ls_hs_config;
772    }
773 }
774 
si_num_prims_for_vertices(enum pipe_prim_type prim,unsigned count,unsigned vertices_per_patch)775 static unsigned si_num_prims_for_vertices(enum pipe_prim_type prim,
776                                           unsigned count, unsigned vertices_per_patch)
777 {
778    switch (prim) {
779    case PIPE_PRIM_PATCHES:
780       return count / vertices_per_patch;
781    case PIPE_PRIM_POLYGON:
782       /* It's a triangle fan with different edge flags. */
783       return count >= 3 ? count - 2 : 0;
784    case SI_PRIM_RECTANGLE_LIST:
785       return count / 3;
786    default:
787       return u_decomposed_prims_for_vertices(prim, count);
788    }
789 }
790 
si_get_init_multi_vgt_param(struct si_screen * sscreen,union si_vgt_param_key * key)791 static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_vgt_param_key *key)
792 {
793    STATIC_ASSERT(sizeof(union si_vgt_param_key) == 2);
794    unsigned max_primgroup_in_wave = 2;
795 
796    /* SWITCH_ON_EOP(0) is always preferable. */
797    bool wd_switch_on_eop = false;
798    bool ia_switch_on_eop = false;
799    bool ia_switch_on_eoi = false;
800    bool partial_vs_wave = false;
801    bool partial_es_wave = false;
802 
803    if (key->u.uses_tess) {
804       /* SWITCH_ON_EOI must be set if PrimID is used. */
805       if (key->u.tess_uses_prim_id)
806          ia_switch_on_eoi = true;
807 
808       /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
809       if ((sscreen->info.family == CHIP_TAHITI || sscreen->info.family == CHIP_PITCAIRN ||
810            sscreen->info.family == CHIP_BONAIRE) &&
811           key->u.uses_gs)
812          partial_vs_wave = true;
813 
814       /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
815       if (sscreen->info.has_distributed_tess) {
816          if (key->u.uses_gs) {
817             if (sscreen->info.chip_class == GFX8)
818                partial_es_wave = true;
819          } else {
820             partial_vs_wave = true;
821          }
822       }
823    }
824 
825    /* This is a hardware requirement. */
826    if (key->u.line_stipple_enabled || (sscreen->debug_flags & DBG(SWITCH_ON_EOP))) {
827       ia_switch_on_eop = true;
828       wd_switch_on_eop = true;
829    }
830 
831    if (sscreen->info.chip_class >= GFX7) {
832       /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
833        * 4 shader engines. Set 1 to pass the assertion below.
834        * The other cases are hardware requirements.
835        *
836        * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
837        * for points, line strips, and tri strips.
838        */
839       if (sscreen->info.max_se <= 2 || key->u.prim == PIPE_PRIM_POLYGON ||
840           key->u.prim == PIPE_PRIM_LINE_LOOP || key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
841           key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
842           (key->u.primitive_restart &&
843            (sscreen->info.family < CHIP_POLARIS10 ||
844             (key->u.prim != PIPE_PRIM_POINTS && key->u.prim != PIPE_PRIM_LINE_STRIP &&
845              key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
846           key->u.count_from_stream_output)
847          wd_switch_on_eop = true;
848 
849       /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
850        * We don't know that for indirect drawing, so treat it as
851        * always problematic. */
852       if (sscreen->info.family == CHIP_HAWAII && key->u.uses_instancing)
853          wd_switch_on_eop = true;
854 
855       /* Performance recommendation for 4 SE Gfx7-8 parts if
856        * instances are smaller than a primgroup.
857        * Assume indirect draws always use small instances.
858        * This is needed for good VS wave utilization.
859        */
860       if (sscreen->info.chip_class <= GFX8 && sscreen->info.max_se == 4 &&
861           key->u.multi_instances_smaller_than_primgroup)
862          wd_switch_on_eop = true;
863 
864       /* Required on GFX7 and later. */
865       if (sscreen->info.max_se == 4 && !wd_switch_on_eop)
866          ia_switch_on_eoi = true;
867 
868       /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
869        * to work around a GS hang.
870        */
871       if (key->u.uses_gs &&
872           (sscreen->info.family == CHIP_TONGA || sscreen->info.family == CHIP_FIJI ||
873            sscreen->info.family == CHIP_POLARIS10 || sscreen->info.family == CHIP_POLARIS11 ||
874            sscreen->info.family == CHIP_POLARIS12 || sscreen->info.family == CHIP_VEGAM))
875          partial_vs_wave = true;
876 
877       /* Required by Hawaii and, for some special cases, by GFX8. */
878       if (ia_switch_on_eoi &&
879           (sscreen->info.family == CHIP_HAWAII ||
880            (sscreen->info.chip_class == GFX8 && (key->u.uses_gs || max_primgroup_in_wave != 2))))
881          partial_vs_wave = true;
882 
883       /* Instancing bug on Bonaire. */
884       if (sscreen->info.family == CHIP_BONAIRE && ia_switch_on_eoi && key->u.uses_instancing)
885          partial_vs_wave = true;
886 
887       /* This only applies to Polaris10 and later 4 SE chips.
888        * wd_switch_on_eop is already true on all other chips.
889        */
890       if (!wd_switch_on_eop && key->u.primitive_restart)
891          partial_vs_wave = true;
892 
893       /* If the WD switch is false, the IA switch must be false too. */
894       assert(wd_switch_on_eop || !ia_switch_on_eop);
895    }
896 
897    /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
898    if (sscreen->info.chip_class <= GFX8 && ia_switch_on_eoi)
899       partial_es_wave = true;
900 
901    return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
902           S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
903           S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
904           S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= GFX7 ? wd_switch_on_eop : 0) |
905           /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
906           S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == GFX8 ? max_primgroup_in_wave
907                                                                         : 0) |
908           S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
909           S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
910 }
911 
si_init_ia_multi_vgt_param_table(struct si_context * sctx)912 static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
913 {
914    for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
915       for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
916          for (int multi_instances = 0; multi_instances < 2; multi_instances++)
917             for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
918                for (int count_from_so = 0; count_from_so < 2; count_from_so++)
919                   for (int line_stipple = 0; line_stipple < 2; line_stipple++)
920                      for (int uses_tess = 0; uses_tess < 2; uses_tess++)
921                         for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
922                            for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
923                               union si_vgt_param_key key;
924 
925                               key.index = 0;
926                               key.u.prim = prim;
927                               key.u.uses_instancing = uses_instancing;
928                               key.u.multi_instances_smaller_than_primgroup = multi_instances;
929                               key.u.primitive_restart = primitive_restart;
930                               key.u.count_from_stream_output = count_from_so;
931                               key.u.line_stipple_enabled = line_stipple;
932                               key.u.uses_tess = uses_tess;
933                               key.u.tess_uses_prim_id = tess_uses_primid;
934                               key.u.uses_gs = uses_gs;
935 
936                               sctx->ia_multi_vgt_param[key.index] =
937                                  si_get_init_multi_vgt_param(sctx->screen, &key);
938                            }
939 }
940 
si_is_line_stipple_enabled(struct si_context * sctx)941 static bool si_is_line_stipple_enabled(struct si_context *sctx)
942 {
943    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
944 
945    return rs->line_stipple_enable && sctx->current_rast_prim != PIPE_PRIM_POINTS &&
946           (rs->polygon_mode_is_lines || util_prim_is_lines(sctx->current_rast_prim));
947 }
948 
949 enum si_is_draw_vertex_state {
950    DRAW_VERTEX_STATE_OFF,
951    DRAW_VERTEX_STATE_ON,
952 };
953 
954 template <si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
num_instanced_prims_less_than(const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned min_vertex_count,unsigned instance_count,unsigned num_prims,ubyte vertices_per_patch)955 static bool num_instanced_prims_less_than(const struct pipe_draw_indirect_info *indirect,
956                                           enum pipe_prim_type prim,
957                                           unsigned min_vertex_count,
958                                           unsigned instance_count,
959                                           unsigned num_prims,
960                                           ubyte vertices_per_patch)
961 {
962    if (IS_DRAW_VERTEX_STATE)
963       return 0;
964 
965    if (indirect) {
966       return indirect->buffer ||
967              (instance_count > 1 && indirect->count_from_stream_output);
968    } else {
969       return instance_count > 1 &&
970              si_num_prims_for_vertices(prim, min_vertex_count, vertices_per_patch) < num_prims;
971    }
972 }
973 
974 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
975           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_get_ia_multi_vgt_param(struct si_context * sctx,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned num_patches,unsigned instance_count,bool primitive_restart,unsigned min_vertex_count)976 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
977                                           const struct pipe_draw_indirect_info *indirect,
978                                           enum pipe_prim_type prim, unsigned num_patches,
979                                           unsigned instance_count, bool primitive_restart,
980                                           unsigned min_vertex_count)
981 {
982    union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
983    unsigned primgroup_size;
984    unsigned ia_multi_vgt_param;
985 
986    if (HAS_TESS) {
987       primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
988    } else if (HAS_GS) {
989       primgroup_size = 64; /* recommended with a GS */
990    } else {
991       primgroup_size = 128; /* recommended without a GS and tess */
992    }
993 
994    key.u.prim = prim;
995    key.u.uses_instancing = !IS_DRAW_VERTEX_STATE &&
996                            ((indirect && indirect->buffer) || instance_count > 1);
997    key.u.multi_instances_smaller_than_primgroup =
998       num_instanced_prims_less_than<IS_DRAW_VERTEX_STATE>(indirect, prim, min_vertex_count,
999                                                           instance_count, primgroup_size,
1000                                                           sctx->patch_vertices);
1001    key.u.primitive_restart = !IS_DRAW_VERTEX_STATE && primitive_restart;
1002    key.u.count_from_stream_output = !IS_DRAW_VERTEX_STATE && indirect &&
1003                                     indirect->count_from_stream_output;
1004    key.u.line_stipple_enabled = si_is_line_stipple_enabled(sctx);
1005 
1006    ia_multi_vgt_param =
1007       sctx->ia_multi_vgt_param[key.index] | S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
1008 
1009    if (HAS_GS) {
1010       /* GS requirement. */
1011       if (GFX_VERSION <= GFX8 &&
1012           SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
1013          ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
1014 
1015       /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
1016        * The hw doc says all multi-SE chips are affected, but Vulkan
1017        * only applies it to Hawaii. Do what Vulkan does.
1018        */
1019       if (GFX_VERSION == GFX7 &&
1020           sctx->family == CHIP_HAWAII && G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
1021           num_instanced_prims_less_than<IS_DRAW_VERTEX_STATE>(indirect, prim, min_vertex_count,
1022                                                               instance_count, 2, sctx->patch_vertices))
1023          sctx->flags |= SI_CONTEXT_VGT_FLUSH;
1024    }
1025 
1026    return ia_multi_vgt_param;
1027 }
1028 
1029 ALWAYS_INLINE
si_conv_prim_to_gs_out(unsigned mode)1030 static unsigned si_conv_prim_to_gs_out(unsigned mode)
1031 {
1032    static const int prim_conv[] = {
1033       [PIPE_PRIM_POINTS] = V_028A6C_POINTLIST,
1034       [PIPE_PRIM_LINES] = V_028A6C_LINESTRIP,
1035       [PIPE_PRIM_LINE_LOOP] = V_028A6C_LINESTRIP,
1036       [PIPE_PRIM_LINE_STRIP] = V_028A6C_LINESTRIP,
1037       [PIPE_PRIM_TRIANGLES] = V_028A6C_TRISTRIP,
1038       [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_TRISTRIP,
1039       [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_TRISTRIP,
1040       [PIPE_PRIM_QUADS] = V_028A6C_TRISTRIP,
1041       [PIPE_PRIM_QUAD_STRIP] = V_028A6C_TRISTRIP,
1042       [PIPE_PRIM_POLYGON] = V_028A6C_TRISTRIP,
1043       [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_LINESTRIP,
1044       [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_LINESTRIP,
1045       [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_TRISTRIP,
1046       [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_TRISTRIP,
1047       [PIPE_PRIM_PATCHES] = V_028A6C_POINTLIST,
1048       [SI_PRIM_RECTANGLE_LIST] = V_028A6C_RECTLIST,
1049    };
1050    assert(mode < ARRAY_SIZE(prim_conv));
1051 
1052    return prim_conv[mode];
1053 }
1054 
1055 /* rast_prim is the primitive type after GS. */
1056 template<chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
si_emit_rasterizer_prim_state(struct si_context * sctx)1057 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
1058 {
1059    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1060    enum pipe_prim_type rast_prim = sctx->current_rast_prim;
1061    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1062 
1063    radeon_begin(cs);
1064 
1065    if (unlikely(si_is_line_stipple_enabled(sctx))) {
1066       /* For lines, reset the stipple pattern at each primitive. Otherwise,
1067        * reset the stipple pattern at each packet (line strips, line loops).
1068        */
1069       bool reset_per_prim = rast_prim == PIPE_PRIM_LINES ||
1070                             rast_prim == PIPE_PRIM_LINES_ADJACENCY;
1071       /* 0 = no reset, 1 = reset per prim, 2 = reset per packet */
1072       unsigned value =
1073          rs->pa_sc_line_stipple | S_028A0C_AUTO_RESET_CNTL(reset_per_prim ? 1 : 2);
1074 
1075       radeon_opt_set_context_reg(sctx, R_028A0C_PA_SC_LINE_STIPPLE, SI_TRACKED_PA_SC_LINE_STIPPLE,
1076                                  value);
1077    }
1078 
1079    unsigned gs_out_prim = si_conv_prim_to_gs_out(rast_prim);
1080    if (unlikely(gs_out_prim != sctx->last_gs_out_prim && (NGG || HAS_GS))) {
1081       radeon_set_context_reg(R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
1082       sctx->last_gs_out_prim = gs_out_prim;
1083    }
1084 
1085    if (GFX_VERSION == GFX9)
1086       radeon_end_update_context_roll(sctx);
1087    else
1088       radeon_end();
1089 
1090    if (NGG) {
1091       struct si_shader *hw_vs = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current;
1092 
1093       if (hw_vs->uses_vs_state_provoking_vertex) {
1094          unsigned vtx_index = rs->flatshade_first ? 0 : gs_out_prim;
1095 
1096          sctx->current_vs_state &= C_VS_STATE_PROVOKING_VTX_INDEX;
1097          sctx->current_vs_state |= S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index);
1098       }
1099 
1100       if (hw_vs->uses_vs_state_outprim) {
1101          sctx->current_vs_state &= C_VS_STATE_OUTPRIM;
1102          sctx->current_vs_state |= S_VS_STATE_OUTPRIM(gs_out_prim);
1103       }
1104    }
1105 }
1106 
1107 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1108           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_vs_state(struct si_context * sctx,unsigned index_size)1109 static void si_emit_vs_state(struct si_context *sctx, unsigned index_size)
1110 {
1111    if (!IS_DRAW_VERTEX_STATE && sctx->num_vs_blit_sgprs) {
1112       /* Re-emit the state after we leave u_blitter. */
1113       sctx->last_vs_state = ~0;
1114       return;
1115    }
1116 
1117    if (sctx->shader.vs.cso->info.uses_base_vertex) {
1118       sctx->current_vs_state &= C_VS_STATE_INDEXED;
1119       sctx->current_vs_state |= S_VS_STATE_INDEXED(!!index_size);
1120    }
1121 
1122    if (sctx->current_vs_state != sctx->last_vs_state) {
1123       struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1124 
1125       /* For the API vertex shader (VS_STATE_INDEXED, LS_OUT_*). */
1126       unsigned vs_base = si_get_user_data_base(GFX_VERSION, HAS_TESS, HAS_GS, NGG,
1127                                                PIPE_SHADER_VERTEX);
1128       radeon_begin(cs);
1129       radeon_set_sh_reg(vs_base + SI_SGPR_VS_STATE_BITS * 4,
1130                         sctx->current_vs_state);
1131 
1132       /* Set CLAMP_VERTEX_COLOR and OUTPRIM in the last stage
1133        * before the rasterizer.
1134        *
1135        * For TES or the GS copy shader without NGG:
1136        */
1137       if (vs_base != R_00B130_SPI_SHADER_USER_DATA_VS_0) {
1138          radeon_set_sh_reg(R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_VS_STATE_BITS * 4,
1139                            sctx->current_vs_state);
1140       }
1141 
1142       /* For NGG: */
1143       if (GFX_VERSION >= GFX10 && vs_base != R_00B230_SPI_SHADER_USER_DATA_GS_0) {
1144          radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + SI_SGPR_VS_STATE_BITS * 4,
1145                            sctx->current_vs_state);
1146       }
1147       radeon_end();
1148 
1149       sctx->last_vs_state = sctx->current_vs_state;
1150    }
1151 }
1152 
1153 ALWAYS_INLINE
si_prim_restart_index_changed(struct si_context * sctx,bool primitive_restart,unsigned restart_index)1154 static bool si_prim_restart_index_changed(struct si_context *sctx, bool primitive_restart,
1155                                           unsigned restart_index)
1156 {
1157    return primitive_restart && (restart_index != sctx->last_restart_index ||
1158                                 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
1159 }
1160 
1161 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
1162           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_ia_multi_vgt_param(struct si_context * sctx,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned num_patches,unsigned instance_count,bool primitive_restart,unsigned min_vertex_count)1163 static void si_emit_ia_multi_vgt_param(struct si_context *sctx,
1164                                        const struct pipe_draw_indirect_info *indirect,
1165                                        enum pipe_prim_type prim, unsigned num_patches,
1166                                        unsigned instance_count, bool primitive_restart,
1167                                        unsigned min_vertex_count)
1168 {
1169    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1170    unsigned ia_multi_vgt_param;
1171 
1172    ia_multi_vgt_param =
1173       si_get_ia_multi_vgt_param<GFX_VERSION, HAS_TESS, HAS_GS, IS_DRAW_VERTEX_STATE>
1174          (sctx, indirect, prim, num_patches, instance_count, primitive_restart,
1175           min_vertex_count);
1176 
1177    /* Draw state. */
1178    if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
1179       radeon_begin(cs);
1180 
1181       if (GFX_VERSION == GFX9)
1182          radeon_set_uconfig_reg_idx(sctx->screen, GFX_VERSION,
1183                                     R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
1184       else if (GFX_VERSION >= GFX7)
1185          radeon_set_context_reg_idx(R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
1186       else
1187          radeon_set_context_reg(R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
1188 
1189       radeon_end();
1190 
1191       sctx->last_multi_vgt_param = ia_multi_vgt_param;
1192    }
1193 }
1194 
1195 /* GFX10 removed IA_MULTI_VGT_PARAM in exchange for GE_CNTL.
1196  * We overload last_multi_vgt_param.
1197  */
1198 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
gfx10_emit_ge_cntl(struct si_context * sctx,unsigned num_patches)1199 static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
1200 {
1201    union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
1202    unsigned ge_cntl;
1203 
1204    if (NGG) {
1205       if (HAS_TESS) {
1206          ge_cntl = S_03096C_PRIM_GRP_SIZE(num_patches) |
1207                    S_03096C_VERT_GRP_SIZE(0) |
1208                    S_03096C_BREAK_WAVE_AT_EOI(key.u.tess_uses_prim_id);
1209       } else {
1210          ge_cntl = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->ge_cntl;
1211       }
1212    } else {
1213       unsigned primgroup_size;
1214       unsigned vertgroup_size;
1215 
1216       if (HAS_TESS) {
1217          primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
1218          vertgroup_size = 0;
1219       } else if (HAS_GS) {
1220          unsigned vgt_gs_onchip_cntl = sctx->shader.gs.current->ctx_reg.gs.vgt_gs_onchip_cntl;
1221          primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
1222          vertgroup_size = G_028A44_ES_VERTS_PER_SUBGRP(vgt_gs_onchip_cntl);
1223       } else {
1224          primgroup_size = 128; /* recommended without a GS and tess */
1225          vertgroup_size = 0;
1226       }
1227 
1228       ge_cntl = S_03096C_PRIM_GRP_SIZE(primgroup_size) | S_03096C_VERT_GRP_SIZE(vertgroup_size) |
1229                 S_03096C_BREAK_WAVE_AT_EOI(key.u.uses_tess && key.u.tess_uses_prim_id);
1230    }
1231 
1232    ge_cntl |= S_03096C_PACKET_TO_ONE_PA(si_is_line_stipple_enabled(sctx));
1233 
1234    if (ge_cntl != sctx->last_multi_vgt_param) {
1235       struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1236 
1237       radeon_begin(cs);
1238       radeon_set_uconfig_reg(R_03096C_GE_CNTL, ge_cntl);
1239       radeon_end();
1240       sctx->last_multi_vgt_param = ge_cntl;
1241    }
1242 }
1243 
1244 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1245           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_draw_registers(struct si_context * sctx,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned num_patches,unsigned instance_count,bool primitive_restart,unsigned restart_index,unsigned min_vertex_count)1246 static void si_emit_draw_registers(struct si_context *sctx,
1247                                    const struct pipe_draw_indirect_info *indirect,
1248                                    enum pipe_prim_type prim, unsigned num_patches,
1249                                    unsigned instance_count, bool primitive_restart,
1250                                    unsigned restart_index, unsigned min_vertex_count)
1251 {
1252    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1253 
1254    if (IS_DRAW_VERTEX_STATE)
1255       primitive_restart = false;
1256 
1257    if (GFX_VERSION >= GFX10)
1258       gfx10_emit_ge_cntl<GFX_VERSION, HAS_TESS, HAS_GS, NGG>(sctx, num_patches);
1259    else
1260       si_emit_ia_multi_vgt_param<GFX_VERSION, HAS_TESS, HAS_GS, IS_DRAW_VERTEX_STATE>
1261          (sctx, indirect, prim, num_patches, instance_count, primitive_restart,
1262           min_vertex_count);
1263 
1264    radeon_begin(cs);
1265 
1266    if (prim != sctx->last_prim) {
1267       unsigned vgt_prim = si_conv_pipe_prim(prim);
1268 
1269       if (GFX_VERSION >= GFX10)
1270          radeon_set_uconfig_reg(R_030908_VGT_PRIMITIVE_TYPE, vgt_prim);
1271       else if (GFX_VERSION >= GFX7)
1272          radeon_set_uconfig_reg_idx(sctx->screen, GFX_VERSION, R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
1273       else
1274          radeon_set_config_reg(R_008958_VGT_PRIMITIVE_TYPE, vgt_prim);
1275 
1276       sctx->last_prim = prim;
1277    }
1278 
1279    /* Primitive restart. */
1280    if (primitive_restart != sctx->last_primitive_restart_en) {
1281       if (GFX_VERSION >= GFX9)
1282          radeon_set_uconfig_reg(R_03092C_VGT_MULTI_PRIM_IB_RESET_EN, primitive_restart);
1283       else
1284          radeon_set_context_reg(R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, primitive_restart);
1285 
1286       sctx->last_primitive_restart_en = primitive_restart;
1287    }
1288    if (si_prim_restart_index_changed(sctx, primitive_restart, restart_index)) {
1289       radeon_set_context_reg(R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, restart_index);
1290       sctx->last_restart_index = restart_index;
1291       if (GFX_VERSION == GFX9)
1292          sctx->context_roll = true;
1293    }
1294    radeon_end();
1295 }
1296 
1297 #define EMIT_SQTT_END_DRAW do {                                          \
1298       if (GFX_VERSION >= GFX9 && unlikely(sctx->thread_trace_enabled)) { \
1299          radeon_begin(&sctx->gfx_cs);                                    \
1300          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));       \
1301          radeon_emit(EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) |          \
1302                      EVENT_INDEX(0));                                    \
1303          radeon_end();                                      \
1304       }                                                                  \
1305    } while (0)
1306 
1307 template <chip_class GFX_VERSION, si_has_ngg NGG, si_is_draw_vertex_state IS_DRAW_VERTEX_STATE>
1308 ALWAYS_INLINE
si_emit_draw_packets(struct si_context * sctx,const struct pipe_draw_info * info,unsigned drawid_base,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,unsigned total_count,struct pipe_resource * indexbuf,unsigned index_size,unsigned index_offset,unsigned instance_count,unsigned original_index_size)1309 static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw_info *info,
1310                                  unsigned drawid_base,
1311                                  const struct pipe_draw_indirect_info *indirect,
1312                                  const struct pipe_draw_start_count_bias *draws,
1313                                  unsigned num_draws, unsigned total_count,
1314                                  struct pipe_resource *indexbuf, unsigned index_size,
1315                                  unsigned index_offset, unsigned instance_count,
1316                                  unsigned original_index_size)
1317 {
1318    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1319 
1320    if (unlikely(sctx->thread_trace_enabled)) {
1321       si_sqtt_write_event_marker(sctx, &sctx->gfx_cs, sctx->sqtt_next_event,
1322                                  UINT_MAX, UINT_MAX, UINT_MAX);
1323    }
1324 
1325    uint32_t use_opaque = 0;
1326 
1327    if (!IS_DRAW_VERTEX_STATE && indirect && indirect->count_from_stream_output) {
1328       struct si_streamout_target *t = (struct si_streamout_target *)indirect->count_from_stream_output;
1329 
1330       radeon_begin(cs);
1331       radeon_set_context_reg(R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
1332       radeon_end();
1333 
1334       si_cp_copy_data(sctx, &sctx->gfx_cs, COPY_DATA_REG, NULL,
1335                       R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2, COPY_DATA_SRC_MEM,
1336                       t->buf_filled_size, t->buf_filled_size_offset);
1337       use_opaque = S_0287F0_USE_OPAQUE(1);
1338       indirect = NULL;
1339    }
1340 
1341    uint32_t index_max_size = 0;
1342    uint64_t index_va = 0;
1343 
1344    radeon_begin(cs);
1345 
1346    /* draw packet */
1347    if (index_size) {
1348       /* Register shadowing doesn't shadow INDEX_TYPE. */
1349       if (index_size != sctx->last_index_size || sctx->shadowed_regs) {
1350          unsigned index_type;
1351 
1352          /* Index type computation. When we look at how we need to translate index_size,
1353           * we can see that we just need 2 shifts to get the hw value.
1354           *
1355           * 1 = 001b --> 10b = 2
1356           * 2 = 010b --> 00b = 0
1357           * 4 = 100b --> 01b = 1
1358           */
1359          index_type = ((index_size >> 2) | (index_size << 1)) & 0x3;
1360 
1361          if (GFX_VERSION <= GFX7 && SI_BIG_ENDIAN) {
1362             /* GFX7 doesn't support ubyte indices. */
1363             index_type |= index_size == 2 ? V_028A7C_VGT_DMA_SWAP_16_BIT
1364                                           : V_028A7C_VGT_DMA_SWAP_32_BIT;
1365          }
1366 
1367          if (GFX_VERSION >= GFX9) {
1368             radeon_set_uconfig_reg_idx(sctx->screen, GFX_VERSION,
1369                                        R_03090C_VGT_INDEX_TYPE, 2, index_type);
1370          } else {
1371             radeon_emit(PKT3(PKT3_INDEX_TYPE, 0, 0));
1372             radeon_emit(index_type);
1373          }
1374 
1375          sctx->last_index_size = index_size;
1376       }
1377 
1378       index_max_size = (indexbuf->width0 - index_offset) >> util_logbase2(index_size);
1379       /* Skip draw calls with 0-sized index buffers.
1380        * They cause a hang on some chips, like Navi10-14.
1381        */
1382       if (!index_max_size) {
1383          radeon_end();
1384          return;
1385       }
1386 
1387       index_va = si_resource(indexbuf)->gpu_address + index_offset;
1388 
1389       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(indexbuf), RADEON_USAGE_READ,
1390                                 RADEON_PRIO_INDEX_BUFFER);
1391    } else {
1392       /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
1393        * so the state must be re-emitted before the next indexed draw.
1394        */
1395       if (GFX_VERSION >= GFX7)
1396          sctx->last_index_size = -1;
1397    }
1398 
1399    unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
1400    bool render_cond_bit = sctx->render_cond_enabled;
1401 
1402    if (!IS_DRAW_VERTEX_STATE && indirect) {
1403       assert(num_draws == 1);
1404       uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address;
1405 
1406       assert(indirect_va % 8 == 0);
1407 
1408       si_invalidate_draw_constants(sctx);
1409 
1410       radeon_emit(PKT3(PKT3_SET_BASE, 2, 0));
1411       radeon_emit(1);
1412       radeon_emit(indirect_va);
1413       radeon_emit(indirect_va >> 32);
1414 
1415       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(indirect->buffer),
1416                                 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
1417 
1418       unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
1419 
1420       assert(indirect->offset % 4 == 0);
1421 
1422       if (index_size) {
1423          radeon_emit(PKT3(PKT3_INDEX_BASE, 1, 0));
1424          radeon_emit(index_va);
1425          radeon_emit(index_va >> 32);
1426 
1427          radeon_emit(PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1428          radeon_emit(index_max_size);
1429       }
1430 
1431       if (!sctx->screen->has_draw_indirect_multi) {
1432          radeon_emit(PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT : PKT3_DRAW_INDIRECT, 3,
1433                           render_cond_bit));
1434          radeon_emit(indirect->offset);
1435          radeon_emit((sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
1436          radeon_emit((sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
1437          radeon_emit(di_src_sel);
1438       } else {
1439          uint64_t count_va = 0;
1440 
1441          if (indirect->indirect_draw_count) {
1442             struct si_resource *params_buf = si_resource(indirect->indirect_draw_count);
1443 
1444             radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, params_buf, RADEON_USAGE_READ,
1445                                       RADEON_PRIO_DRAW_INDIRECT);
1446 
1447             count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
1448          }
1449 
1450          radeon_emit(PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI : PKT3_DRAW_INDIRECT_MULTI, 8,
1451                           render_cond_bit));
1452          radeon_emit(indirect->offset);
1453          radeon_emit((sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
1454          radeon_emit((sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
1455          radeon_emit(((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
1456                      S_2C3_DRAW_INDEX_ENABLE(sctx->shader.vs.cso->info.uses_drawid) |
1457                      S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
1458          radeon_emit(indirect->draw_count);
1459          radeon_emit(count_va);
1460          radeon_emit(count_va >> 32);
1461          radeon_emit(indirect->stride);
1462          radeon_emit(di_src_sel);
1463       }
1464    } else {
1465       /* Register shadowing requires that we always emit PKT3_NUM_INSTANCES. */
1466       if (sctx->shadowed_regs ||
1467           sctx->last_instance_count == SI_INSTANCE_COUNT_UNKNOWN ||
1468           sctx->last_instance_count != instance_count) {
1469          radeon_emit(PKT3(PKT3_NUM_INSTANCES, 0, 0));
1470          radeon_emit(instance_count);
1471          sctx->last_instance_count = instance_count;
1472       }
1473 
1474       /* Base vertex and start instance. */
1475       int base_vertex = original_index_size ? draws[0].index_bias : draws[0].start;
1476 
1477       bool set_draw_id = !IS_DRAW_VERTEX_STATE && sctx->vs_uses_draw_id;
1478       bool set_base_instance = sctx->vs_uses_base_instance;
1479 
1480       if (!IS_DRAW_VERTEX_STATE && sctx->num_vs_blit_sgprs) {
1481          /* Re-emit draw constants after we leave u_blitter. */
1482          si_invalidate_draw_sh_constants(sctx);
1483 
1484          /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
1485          radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4, sctx->num_vs_blit_sgprs);
1486          radeon_emit_array(sctx->vs_blit_sh_data, sctx->num_vs_blit_sgprs);
1487       } else if (base_vertex != sctx->last_base_vertex ||
1488                  sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
1489                  (set_base_instance &&
1490                   (info->start_instance != sctx->last_start_instance ||
1491                    sctx->last_start_instance == SI_START_INSTANCE_UNKNOWN)) ||
1492                  (set_draw_id &&
1493                   (drawid_base != sctx->last_drawid ||
1494                    sctx->last_drawid == SI_DRAW_ID_UNKNOWN)) ||
1495                  sh_base_reg != sctx->last_sh_base_reg) {
1496          if (set_base_instance) {
1497             radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
1498             radeon_emit(base_vertex);
1499             radeon_emit(drawid_base);
1500             radeon_emit(info->start_instance);
1501 
1502             sctx->last_start_instance = info->start_instance;
1503             sctx->last_drawid = drawid_base;
1504          } else if (set_draw_id) {
1505             radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
1506             radeon_emit(base_vertex);
1507             radeon_emit(drawid_base);
1508 
1509             sctx->last_drawid = drawid_base;
1510          } else {
1511             radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, base_vertex);
1512          }
1513 
1514          sctx->last_base_vertex = base_vertex;
1515          sctx->last_sh_base_reg = sh_base_reg;
1516       }
1517 
1518       /* Don't update draw_id in the following code if it doesn't increment. */
1519       bool increment_draw_id = !IS_DRAW_VERTEX_STATE && num_draws > 1 &&
1520                                set_draw_id && info->increment_draw_id;
1521 
1522       if (index_size) {
1523          /* NOT_EOP allows merging multiple draws into 1 wave, but only user VGPRs
1524           * can be changed between draws, and GS fast launch must be disabled.
1525           * NOT_EOP doesn't work on gfx9 and older.
1526           *
1527           * Instead of doing this, which evaluates the case conditions repeatedly:
1528           *  for (all draws) {
1529           *    if (case1);
1530           *    else;
1531           *  }
1532           *
1533           * Use this structuring to evaluate the case conditions once:
1534           *  if (case1) for (all draws);
1535           *  else for (all draws);
1536           *
1537           */
1538          bool index_bias_varies = !IS_DRAW_VERTEX_STATE && num_draws > 1 &&
1539                                   info->index_bias_varies;
1540 
1541          if (increment_draw_id) {
1542             if (index_bias_varies) {
1543                for (unsigned i = 0; i < num_draws; i++) {
1544                   uint64_t va = index_va + draws[i].start * index_size;
1545 
1546                   if (i > 0) {
1547                      radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
1548                      radeon_emit(draws[i].index_bias);
1549                      radeon_emit(drawid_base + i);
1550                   }
1551 
1552                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1553                   radeon_emit(index_max_size);
1554                   radeon_emit(va);
1555                   radeon_emit(va >> 32);
1556                   radeon_emit(draws[i].count);
1557                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA); /* NOT_EOP disabled */
1558                }
1559                if (num_draws > 1) {
1560                   sctx->last_base_vertex = draws[num_draws - 1].index_bias;
1561                   sctx->last_drawid = drawid_base + num_draws - 1;
1562                }
1563             } else {
1564                /* Only DrawID varies. */
1565                for (unsigned i = 0; i < num_draws; i++) {
1566                   uint64_t va = index_va + draws[i].start * index_size;
1567 
1568                   if (i > 0)
1569                      radeon_set_sh_reg(sh_base_reg + SI_SGPR_DRAWID * 4, drawid_base + i);
1570 
1571                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1572                   radeon_emit(index_max_size);
1573                   radeon_emit(va);
1574                   radeon_emit(va >> 32);
1575                   radeon_emit(draws[i].count);
1576                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA); /* NOT_EOP disabled */
1577                }
1578                if (num_draws > 1)
1579                   sctx->last_drawid = drawid_base + num_draws - 1;
1580             }
1581          } else {
1582             if (index_bias_varies) {
1583                /* Only BaseVertex varies. */
1584                for (unsigned i = 0; i < num_draws; i++) {
1585                   uint64_t va = index_va + draws[i].start * index_size;
1586 
1587                   if (i > 0)
1588                      radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].index_bias);
1589 
1590                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1591                   radeon_emit(index_max_size);
1592                   radeon_emit(va);
1593                   radeon_emit(va >> 32);
1594                   radeon_emit(draws[i].count);
1595                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA); /* NOT_EOP disabled */
1596                }
1597                if (num_draws > 1)
1598                   sctx->last_base_vertex = draws[num_draws - 1].index_bias;
1599             } else {
1600                /* DrawID and BaseVertex are constant. */
1601                if (GFX_VERSION == GFX10) {
1602                   /* GFX10 has a bug that consecutive draw packets with NOT_EOP must not have
1603                    * count == 0 in the last draw (which doesn't set NOT_EOP).
1604                    *
1605                    * So remove all trailing draws with count == 0.
1606                    */
1607                   while (num_draws > 1 && !draws[num_draws - 1].count)
1608                      num_draws--;
1609                }
1610 
1611                for (unsigned i = 0; i < num_draws; i++) {
1612                   uint64_t va = index_va + draws[i].start * index_size;
1613 
1614                   radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1615                   radeon_emit(index_max_size);
1616                   radeon_emit(va);
1617                   radeon_emit(va >> 32);
1618                   radeon_emit(draws[i].count);
1619                   radeon_emit(V_0287F0_DI_SRC_SEL_DMA |
1620                               S_0287F0_NOT_EOP(GFX_VERSION >= GFX10 && i < num_draws - 1));
1621                }
1622             }
1623          }
1624       } else {
1625          for (unsigned i = 0; i < num_draws; i++) {
1626             if (i > 0) {
1627                if (increment_draw_id) {
1628                   unsigned draw_id = drawid_base + i;
1629 
1630                   radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
1631                   radeon_emit(draws[i].start);
1632                   radeon_emit(draw_id);
1633 
1634                   sctx->last_drawid = draw_id;
1635                } else {
1636                   radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].start);
1637                }
1638             }
1639 
1640             radeon_emit(PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
1641             radeon_emit(draws[i].count);
1642             radeon_emit(V_0287F0_DI_SRC_SEL_AUTO_INDEX | use_opaque);
1643          }
1644          if (num_draws > 1 && (IS_DRAW_VERTEX_STATE || !sctx->num_vs_blit_sgprs))
1645             sctx->last_base_vertex = draws[num_draws - 1].start;
1646       }
1647    }
1648    radeon_end();
1649 
1650    EMIT_SQTT_END_DRAW;
1651 }
1652 
1653 /* Return false if not bound. */
1654 template<chip_class GFX_VERSION>
si_set_vb_descriptor(struct si_vertex_elements * velems,struct pipe_vertex_buffer * vb,unsigned index,uint32_t * desc)1655 static bool ALWAYS_INLINE si_set_vb_descriptor(struct si_vertex_elements *velems,
1656                                                struct pipe_vertex_buffer *vb,
1657                                                unsigned index, /* vertex element index */
1658                                                uint32_t *desc) /* where to upload descriptors */
1659 {
1660    struct si_resource *buf = si_resource(vb->buffer.resource);
1661    if (!buf) {
1662       memset(desc, 0, 16);
1663       return false;
1664    }
1665 
1666    int64_t offset = (int64_t)((int)vb->buffer_offset) + velems->src_offset[index];
1667 
1668    if (offset >= buf->b.b.width0) {
1669       assert(offset < buf->b.b.width0);
1670       memset(desc, 0, 16);
1671       return false;
1672    }
1673 
1674    uint64_t va = buf->gpu_address + offset;
1675 
1676    int64_t num_records = (int64_t)buf->b.b.width0 - offset;
1677    if (GFX_VERSION != GFX8 && vb->stride) {
1678       /* Round up by rounding down and adding 1 */
1679       num_records = (num_records - velems->format_size[index]) / vb->stride + 1;
1680    }
1681    assert(num_records >= 0 && num_records <= UINT_MAX);
1682 
1683    uint32_t rsrc_word3 = velems->rsrc_word3[index];
1684 
1685    /* OOB_SELECT chooses the out-of-bounds check:
1686     *  - 1: index >= NUM_RECORDS (Structured)
1687     *  - 3: offset >= NUM_RECORDS (Raw)
1688     */
1689    if (GFX_VERSION >= GFX10)
1690       rsrc_word3 |= S_008F0C_OOB_SELECT(vb->stride ? V_008F0C_OOB_SELECT_STRUCTURED
1691                                                    : V_008F0C_OOB_SELECT_RAW);
1692 
1693    desc[0] = va;
1694    desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(vb->stride);
1695    desc[2] = num_records;
1696    desc[3] = rsrc_word3;
1697    return true;
1698 }
1699 
1700 #if GFX_VER == 6 /* declare this function only once because it supports all chips. */
1701 
si_set_vertex_buffer_descriptor(struct si_screen * sscreen,struct si_vertex_elements * velems,struct pipe_vertex_buffer * vb,unsigned element_index,uint32_t * out)1702 void si_set_vertex_buffer_descriptor(struct si_screen *sscreen, struct si_vertex_elements *velems,
1703                                      struct pipe_vertex_buffer *vb, unsigned element_index,
1704                                      uint32_t *out)
1705 {
1706    switch (sscreen->info.chip_class) {
1707    case GFX6:
1708       si_set_vb_descriptor<GFX6>(velems, vb, element_index, out);
1709       break;
1710    case GFX7:
1711       si_set_vb_descriptor<GFX7>(velems, vb, element_index, out);
1712       break;
1713    case GFX8:
1714       si_set_vb_descriptor<GFX8>(velems, vb, element_index, out);
1715       break;
1716    case GFX9:
1717       si_set_vb_descriptor<GFX9>(velems, vb, element_index, out);
1718       break;
1719    case GFX10:
1720       si_set_vb_descriptor<GFX10>(velems, vb, element_index, out);
1721       break;
1722    case GFX10_3:
1723       si_set_vb_descriptor<GFX10_3>(velems, vb, element_index, out);
1724       break;
1725    default:
1726       unreachable("unhandled chip class");
1727    }
1728 }
1729 
1730 #endif
1731 
1732 /* util_bitcount has large measurable overhead (~2% difference in viewperf),  so we use
1733  * the POPCNT x86 instruction via inline assembly if the CPU supports it.
1734  */
1735 enum si_has_popcnt {
1736    POPCNT_NO,
1737    POPCNT_YES,
1738 };
1739 
1740 template<si_has_popcnt POPCNT>
bitcount_asm(unsigned n)1741 unsigned bitcount_asm(unsigned n)
1742 {
1743    if (POPCNT == POPCNT_YES)
1744       return util_popcnt_inline_asm(n);
1745    else
1746       return util_bitcount(n);
1747 }
1748 
1749 template<si_has_popcnt POPCNT>
get_next_vertex_state_elem(struct pipe_vertex_state * state,uint32_t * partial_velem_mask)1750 static ALWAYS_INLINE unsigned get_next_vertex_state_elem(struct pipe_vertex_state *state,
1751                                                          uint32_t *partial_velem_mask)
1752 {
1753    unsigned semantic_index = u_bit_scan(partial_velem_mask);
1754    assert(state->input.full_velem_mask & BITFIELD_BIT(semantic_index));
1755    /* A prefix mask of the full mask gives us the index in pipe_vertex_state. */
1756    return bitcount_asm<POPCNT>(state->input.full_velem_mask & BITFIELD_MASK(semantic_index));
1757 }
1758 
1759 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1760           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE, si_has_popcnt POPCNT> ALWAYS_INLINE
si_upload_and_prefetch_VB_descriptors(struct si_context * sctx,struct pipe_vertex_state * state,uint32_t partial_velem_mask)1761 static bool si_upload_and_prefetch_VB_descriptors(struct si_context *sctx,
1762                                                   struct pipe_vertex_state *state,
1763                                                   uint32_t partial_velem_mask)
1764 {
1765    struct si_vertex_state *vstate = (struct si_vertex_state *)state;
1766    unsigned count = IS_DRAW_VERTEX_STATE ? bitcount_asm<POPCNT>(partial_velem_mask) :
1767                                            sctx->num_vertex_elements;
1768    unsigned sh_base = si_get_user_data_base(GFX_VERSION, HAS_TESS, HAS_GS, NGG,
1769                                             PIPE_SHADER_VERTEX);
1770    unsigned num_vbos_in_user_sgprs = si_num_vbos_in_user_sgprs_inline(GFX_VERSION);
1771    bool pointer_dirty, user_sgprs_dirty;
1772 
1773    assert(count <= SI_MAX_ATTRIBS);
1774 
1775    if (sctx->vertex_buffers_dirty || IS_DRAW_VERTEX_STATE) {
1776       assert(count);
1777 
1778       struct si_vertex_elements *velems = sctx->vertex_elements;
1779       unsigned alloc_size = IS_DRAW_VERTEX_STATE ?
1780                                vstate->velems.vb_desc_list_alloc_size :
1781                                velems->vb_desc_list_alloc_size;
1782       uint32_t *ptr;
1783 
1784       if (alloc_size) {
1785          /* Vertex buffer descriptors are the only ones which are uploaded directly
1786           * and don't go through si_upload_graphics_shader_descriptors.
1787           */
1788          u_upload_alloc(sctx->b.const_uploader, 0, alloc_size,
1789                         si_optimal_tcc_alignment(sctx, alloc_size), &sctx->vb_descriptors_offset,
1790                         (struct pipe_resource **)&sctx->vb_descriptors_buffer, (void **)&ptr);
1791          if (!sctx->vb_descriptors_buffer) {
1792             sctx->vb_descriptors_offset = 0;
1793             sctx->vb_descriptors_gpu_list = NULL;
1794             return false;
1795          }
1796 
1797          sctx->vb_descriptors_gpu_list = ptr;
1798          radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->vb_descriptors_buffer,
1799                                    RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
1800          /* GFX6 doesn't support the L2 prefetch. */
1801          if (GFX_VERSION >= GFX7)
1802             si_cp_dma_prefetch(sctx, &sctx->vb_descriptors_buffer->b.b, sctx->vb_descriptors_offset,
1803                                alloc_size);
1804       } else {
1805          si_resource_reference(&sctx->vb_descriptors_buffer, NULL);
1806       }
1807 
1808       if (IS_DRAW_VERTEX_STATE) {
1809          unsigned i = 0;
1810 
1811          if (num_vbos_in_user_sgprs) {
1812             unsigned num_vb_sgprs = MIN2(count, num_vbos_in_user_sgprs) * 4;
1813 
1814             radeon_begin(&sctx->gfx_cs);
1815             radeon_set_sh_reg_seq(sh_base + SI_SGPR_VS_VB_DESCRIPTOR_FIRST * 4, num_vb_sgprs);
1816 
1817             for (; partial_velem_mask && i < num_vbos_in_user_sgprs; i++) {
1818                unsigned velem_index = get_next_vertex_state_elem<POPCNT>(state, &partial_velem_mask);
1819 
1820                radeon_emit_array(&vstate->descriptors[velem_index * 4], 4);
1821             }
1822             radeon_end();
1823          }
1824 
1825          for (; partial_velem_mask; i++) {
1826             unsigned velem_index = get_next_vertex_state_elem<POPCNT>(state, &partial_velem_mask);
1827             uint32_t *desc = &ptr[(i - num_vbos_in_user_sgprs) * 4];
1828 
1829             memcpy(desc, &vstate->descriptors[velem_index * 4], 16);
1830          }
1831 
1832          if (vstate->b.input.vbuffer.buffer.resource != vstate->b.input.indexbuf) {
1833             radeon_add_to_buffer_list(sctx, &sctx->gfx_cs,
1834                                       si_resource(vstate->b.input.vbuffer.buffer.resource),
1835                                       RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1836          }
1837 
1838          /* The next draw_vbo should recompute and rebind vertex buffer descriptors. */
1839          sctx->vertex_buffers_dirty = sctx->num_vertex_elements > 0;
1840 
1841          user_sgprs_dirty = false; /* We just set them above. */
1842          pointer_dirty = count > num_vbos_in_user_sgprs;
1843       } else {
1844          unsigned first_vb_use_mask = velems->first_vb_use_mask;
1845 
1846          for (unsigned i = 0; i < count; i++) {
1847             unsigned vbo_index = velems->vertex_buffer_index[i];
1848             struct pipe_vertex_buffer *vb = &sctx->vertex_buffer[vbo_index];
1849             uint32_t *desc = i < num_vbos_in_user_sgprs ? &sctx->vb_descriptor_user_sgprs[i * 4]
1850                                                         : &ptr[(i - num_vbos_in_user_sgprs) * 4];
1851 
1852             if (!si_set_vb_descriptor<GFX_VERSION>(velems, vb, i, desc))
1853                continue;
1854 
1855             if (first_vb_use_mask & (1 << i)) {
1856                radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(vb->buffer.resource),
1857                                          RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
1858             }
1859          }
1860 
1861          sctx->vertex_buffers_dirty = false;
1862          user_sgprs_dirty = num_vbos_in_user_sgprs > 0;
1863          pointer_dirty = alloc_size != 0;
1864       }
1865    } else {
1866       pointer_dirty = sctx->vertex_buffer_pointer_dirty;
1867       user_sgprs_dirty = sctx->vertex_buffer_user_sgprs_dirty;
1868    }
1869 
1870    if (pointer_dirty || user_sgprs_dirty) {
1871       struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1872       assert(count);
1873 
1874       radeon_begin(cs);
1875 
1876       /* Set the pointer to vertex buffer descriptors. */
1877       if (pointer_dirty && count > num_vbos_in_user_sgprs) {
1878          /* Find the location of the VB descriptor pointer. */
1879          unsigned sh_dw_offset = SI_VS_NUM_USER_SGPR;
1880          if (GFX_VERSION >= GFX9) {
1881             if (HAS_TESS)
1882                sh_dw_offset = GFX9_TCS_NUM_USER_SGPR;
1883             else if (HAS_GS)
1884                sh_dw_offset = GFX9_VSGS_NUM_USER_SGPR;
1885          }
1886 
1887          radeon_set_sh_reg(sh_base + sh_dw_offset * 4,
1888                            sctx->vb_descriptors_buffer->gpu_address +
1889                            sctx->vb_descriptors_offset);
1890          sctx->vertex_buffer_pointer_dirty = false;
1891       }
1892 
1893       /* Set VB descriptors in user SGPRs. */
1894       if (user_sgprs_dirty) {
1895          assert(num_vbos_in_user_sgprs);
1896 
1897          unsigned num_sgprs = MIN2(count, num_vbos_in_user_sgprs) * 4;
1898 
1899          radeon_set_sh_reg_seq(sh_base + SI_SGPR_VS_VB_DESCRIPTOR_FIRST * 4, num_sgprs);
1900          radeon_emit_array(sctx->vb_descriptor_user_sgprs, num_sgprs);
1901          sctx->vertex_buffer_user_sgprs_dirty = false;
1902       }
1903       radeon_end();
1904    }
1905 
1906    return true;
1907 }
1908 
si_get_draw_start_count(struct si_context * sctx,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,unsigned * start,unsigned * count)1909 static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_draw_info *info,
1910                                     const struct pipe_draw_indirect_info *indirect,
1911                                     const struct pipe_draw_start_count_bias *draws,
1912                                     unsigned num_draws, unsigned *start, unsigned *count)
1913 {
1914    if (indirect && !indirect->count_from_stream_output) {
1915       unsigned indirect_count;
1916       struct pipe_transfer *transfer;
1917       unsigned begin, end;
1918       unsigned map_size;
1919       unsigned *data;
1920 
1921       if (indirect->indirect_draw_count) {
1922          data = (unsigned*)
1923                 pipe_buffer_map_range(&sctx->b, indirect->indirect_draw_count,
1924                                       indirect->indirect_draw_count_offset, sizeof(unsigned),
1925                                       PIPE_MAP_READ, &transfer);
1926 
1927          indirect_count = *data;
1928 
1929          pipe_buffer_unmap(&sctx->b, transfer);
1930       } else {
1931          indirect_count = indirect->draw_count;
1932       }
1933 
1934       if (!indirect_count) {
1935          *start = *count = 0;
1936          return;
1937       }
1938 
1939       map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
1940       data = (unsigned*)
1941              pipe_buffer_map_range(&sctx->b, indirect->buffer, indirect->offset, map_size,
1942                                    PIPE_MAP_READ, &transfer);
1943 
1944       begin = UINT_MAX;
1945       end = 0;
1946 
1947       for (unsigned i = 0; i < indirect_count; ++i) {
1948          unsigned count = data[0];
1949          unsigned start = data[2];
1950 
1951          if (count > 0) {
1952             begin = MIN2(begin, start);
1953             end = MAX2(end, start + count);
1954          }
1955 
1956          data += indirect->stride / sizeof(unsigned);
1957       }
1958 
1959       pipe_buffer_unmap(&sctx->b, transfer);
1960 
1961       if (begin < end) {
1962          *start = begin;
1963          *count = end - begin;
1964       } else {
1965          *start = *count = 0;
1966       }
1967    } else {
1968       unsigned min_element = UINT_MAX;
1969       unsigned max_element = 0;
1970 
1971       for (unsigned i = 0; i < num_draws; i++) {
1972          min_element = MIN2(min_element, draws[i].start);
1973          max_element = MAX2(max_element, draws[i].start + draws[i].count);
1974       }
1975 
1976       *start = min_element;
1977       *count = max_element - min_element;
1978    }
1979 }
1980 
1981 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
1982           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
si_emit_all_states(struct si_context * sctx,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,enum pipe_prim_type prim,unsigned instance_count,unsigned min_vertex_count,bool primitive_restart,unsigned skip_atom_mask)1983 static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
1984                                const struct pipe_draw_indirect_info *indirect,
1985                                enum pipe_prim_type prim, unsigned instance_count,
1986                                unsigned min_vertex_count, bool primitive_restart,
1987                                unsigned skip_atom_mask)
1988 {
1989    unsigned num_patches = 0;
1990 
1991    si_emit_rasterizer_prim_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG>(sctx);
1992    if (HAS_TESS)
1993       si_emit_derived_tess_state(sctx, &num_patches);
1994 
1995    /* Emit state atoms. */
1996    unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
1997    if (mask) {
1998       do {
1999          sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
2000       } while (mask);
2001 
2002       sctx->dirty_atoms &= skip_atom_mask;
2003    }
2004 
2005    /* Emit states. */
2006    mask = sctx->dirty_states;
2007    if (mask) {
2008       do {
2009          unsigned i = u_bit_scan(&mask);
2010          struct si_pm4_state *state = sctx->queued.array[i];
2011 
2012          /* All places should unset dirty_states if this doesn't pass. */
2013          assert(state && state != sctx->emitted.array[i]);
2014 
2015          si_pm4_emit(sctx, state);
2016          sctx->emitted.array[i] = state;
2017       } while (mask);
2018 
2019       sctx->dirty_states = 0;
2020    }
2021 
2022    /* Emit draw states. */
2023    si_emit_vs_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>(sctx, info->index_size);
2024    si_emit_draw_registers<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>
2025          (sctx, indirect, prim, num_patches, instance_count, primitive_restart,
2026           info->restart_index, min_vertex_count);
2027 }
2028 
2029 #define DRAW_CLEANUP do {                                 \
2030       if (index_size && indexbuf != info->index.resource) \
2031          pipe_resource_reference(&indexbuf, NULL);        \
2032    } while (0)
2033 
2034 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
2035           si_is_draw_vertex_state IS_DRAW_VERTEX_STATE, si_has_popcnt POPCNT> ALWAYS_INLINE
si_draw(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,struct pipe_vertex_state * state,uint32_t partial_velem_mask)2036 static void si_draw(struct pipe_context *ctx,
2037                     const struct pipe_draw_info *info,
2038                     unsigned drawid_offset,
2039                     const struct pipe_draw_indirect_info *indirect,
2040                     const struct pipe_draw_start_count_bias *draws,
2041                     unsigned num_draws,
2042                     struct pipe_vertex_state *state,
2043                     uint32_t partial_velem_mask)
2044 {
2045    /* Keep code that uses the least number of local variables as close to the beginning
2046     * of this function as possible to minimize register pressure.
2047     *
2048     * It doesn't matter where we return due to invalid parameters because such cases
2049     * shouldn't occur in practice.
2050     */
2051    struct si_context *sctx = (struct si_context *)ctx;
2052 
2053    /* Recompute and re-emit the texture resource states if needed. */
2054    unsigned dirty_tex_counter = p_atomic_read(&sctx->screen->dirty_tex_counter);
2055    if (unlikely(dirty_tex_counter != sctx->last_dirty_tex_counter)) {
2056       sctx->last_dirty_tex_counter = dirty_tex_counter;
2057       sctx->framebuffer.dirty_cbufs |= ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
2058       sctx->framebuffer.dirty_zsbuf = true;
2059       si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
2060       si_update_all_texture_descriptors(sctx);
2061    }
2062 
2063    unsigned dirty_buf_counter = p_atomic_read(&sctx->screen->dirty_buf_counter);
2064    if (unlikely(dirty_buf_counter != sctx->last_dirty_buf_counter)) {
2065       sctx->last_dirty_buf_counter = dirty_buf_counter;
2066       /* Rebind all buffers unconditionally. */
2067       si_rebind_buffer(sctx, NULL);
2068    }
2069 
2070    si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
2071    si_need_gfx_cs_space(sctx, num_draws);
2072 
2073    if (HAS_TESS) {
2074       struct si_shader_selector *tcs = sctx->shader.tcs.cso;
2075 
2076       /* The rarely occuring tcs == NULL case is not optimized. */
2077       bool same_patch_vertices =
2078          GFX_VERSION >= GFX9 &&
2079          tcs && sctx->patch_vertices == tcs->info.base.tess.tcs_vertices_out;
2080 
2081       if (sctx->shader.tcs.key.opt.same_patch_vertices != same_patch_vertices) {
2082          sctx->shader.tcs.key.opt.same_patch_vertices = same_patch_vertices;
2083          sctx->do_update_shaders = true;
2084       }
2085 
2086       if (GFX_VERSION == GFX9 && sctx->screen->info.has_ls_vgpr_init_bug) {
2087          /* Determine whether the LS VGPR fix should be applied.
2088           *
2089           * It is only required when num input CPs > num output CPs,
2090           * which cannot happen with the fixed function TCS. We should
2091           * also update this bit when switching from TCS to fixed
2092           * function TCS.
2093           */
2094          bool ls_vgpr_fix =
2095             tcs && sctx->patch_vertices > tcs->info.base.tess.tcs_vertices_out;
2096 
2097          if (ls_vgpr_fix != sctx->shader.tcs.key.part.tcs.ls_prolog.ls_vgpr_fix) {
2098             sctx->shader.tcs.key.part.tcs.ls_prolog.ls_vgpr_fix = ls_vgpr_fix;
2099             sctx->fixed_func_tcs_shader.key.part.tcs.ls_prolog.ls_vgpr_fix = ls_vgpr_fix;
2100             sctx->do_update_shaders = true;
2101          }
2102       }
2103    }
2104 
2105    enum pipe_prim_type prim = (enum pipe_prim_type)info->mode;
2106    unsigned instance_count = info->instance_count;
2107 
2108    /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
2109     * no workaround for indirect draws, but we can at least skip
2110     * direct draws.
2111     * 'instance_count == 0' seems to be problematic on Renoir chips (#4866),
2112     * so simplify the condition and drop these draws for all <= GFX9 chips.
2113     */
2114    if (GFX_VERSION <= GFX9 && unlikely(!IS_DRAW_VERTEX_STATE && !indirect && !instance_count))
2115       return;
2116 
2117    struct si_shader_selector *vs = sctx->shader.vs.cso;
2118    struct si_vertex_state *vstate = (struct si_vertex_state *)state;
2119    if (unlikely(!vs ||
2120                 (!IS_DRAW_VERTEX_STATE && sctx->num_vertex_elements < vs->num_vs_inputs) ||
2121                 (IS_DRAW_VERTEX_STATE && vstate->velems.count < vs->num_vs_inputs) ||
2122                 !sctx->shader.ps.cso || (HAS_TESS != (prim == PIPE_PRIM_PATCHES)))) {
2123       assert(0);
2124       return;
2125    }
2126 
2127    if (GFX_VERSION <= GFX9 && HAS_GS) {
2128       /* Determine whether the GS triangle strip adjacency fix should
2129        * be applied. Rotate every other triangle if triangle strips with
2130        * adjacency are fed to the GS. This doesn't work if primitive
2131        * restart occurs after an odd number of triangles.
2132        */
2133       bool gs_tri_strip_adj_fix =
2134          !HAS_TESS && prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
2135 
2136       if (gs_tri_strip_adj_fix != sctx->shader.gs.key.part.gs.prolog.tri_strip_adj_fix) {
2137          sctx->shader.gs.key.part.gs.prolog.tri_strip_adj_fix = gs_tri_strip_adj_fix;
2138          sctx->do_update_shaders = true;
2139       }
2140    }
2141 
2142    struct pipe_resource *indexbuf = info->index.resource;
2143    unsigned index_size = info->index_size;
2144    unsigned index_offset = indirect && indirect->buffer ? draws[0].start * index_size : 0;
2145 
2146    if (index_size) {
2147       /* Translate or upload, if needed. */
2148       /* 8-bit indices are supported on GFX8. */
2149       if (!IS_DRAW_VERTEX_STATE && GFX_VERSION <= GFX7 && index_size == 1) {
2150          unsigned start, count, start_offset, size, offset;
2151          void *ptr;
2152 
2153          si_get_draw_start_count(sctx, info, indirect, draws, num_draws, &start, &count);
2154          start_offset = start * 2;
2155          size = count * 2;
2156 
2157          indexbuf = NULL;
2158          u_upload_alloc(ctx->stream_uploader, start_offset, size,
2159                         si_optimal_tcc_alignment(sctx, size), &offset, &indexbuf, &ptr);
2160          if (unlikely(!indexbuf))
2161             return;
2162 
2163          util_shorten_ubyte_elts_to_userptr(&sctx->b, info, 0, 0, index_offset + start, count, ptr);
2164 
2165          /* info->start will be added by the drawing code */
2166          index_offset = offset - start_offset;
2167          index_size = 2;
2168       } else if (!IS_DRAW_VERTEX_STATE && info->has_user_indices) {
2169          unsigned start_offset;
2170 
2171          assert(!indirect);
2172          assert(num_draws == 1);
2173          start_offset = draws[0].start * index_size;
2174 
2175          indexbuf = NULL;
2176          u_upload_data(ctx->stream_uploader, start_offset, draws[0].count * index_size,
2177                        sctx->screen->info.tcc_cache_line_size,
2178                        (char *)info->index.user + start_offset, &index_offset, &indexbuf);
2179          if (unlikely(!indexbuf))
2180             return;
2181 
2182          /* info->start will be added by the drawing code */
2183          index_offset -= start_offset;
2184       } else if (GFX_VERSION <= GFX7 && si_resource(indexbuf)->TC_L2_dirty) {
2185          /* GFX8 reads index buffers through TC L2, so it doesn't
2186           * need this. */
2187          sctx->flags |= SI_CONTEXT_WB_L2;
2188          si_resource(indexbuf)->TC_L2_dirty = false;
2189       }
2190    }
2191 
2192    unsigned min_direct_count = 0;
2193    unsigned total_direct_count = 0;
2194 
2195    if (!IS_DRAW_VERTEX_STATE && indirect) {
2196       /* Add the buffer size for memory checking in need_cs_space. */
2197       if (indirect->buffer)
2198          si_context_add_resource_size(sctx, indirect->buffer);
2199 
2200       /* Indirect buffers use TC L2 on GFX9, but not older hw. */
2201       if (GFX_VERSION <= GFX8) {
2202          if (indirect->buffer && si_resource(indirect->buffer)->TC_L2_dirty) {
2203             sctx->flags |= SI_CONTEXT_WB_L2;
2204             si_resource(indirect->buffer)->TC_L2_dirty = false;
2205          }
2206 
2207          if (indirect->indirect_draw_count &&
2208              si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
2209             sctx->flags |= SI_CONTEXT_WB_L2;
2210             si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
2211          }
2212       }
2213       total_direct_count = INT_MAX; /* just set something other than 0 to enable shader culling */
2214    } else {
2215       total_direct_count = min_direct_count = draws[0].count;
2216 
2217       for (unsigned i = 1; i < num_draws; i++) {
2218          unsigned count = draws[i].count;
2219 
2220          total_direct_count += count;
2221          min_direct_count = MIN2(min_direct_count, count);
2222       }
2223    }
2224 
2225    struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2226    bool primitive_restart =
2227       info->primitive_restart &&
2228       (!sctx->screen->options.prim_restart_tri_strips_only ||
2229        (prim != PIPE_PRIM_TRIANGLE_STRIP && prim != PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY));
2230    unsigned original_index_size = index_size;
2231 
2232    /* Set the rasterization primitive type.
2233     *
2234     * This must be done after si_decompress_textures, which can call
2235     * draw_vbo recursively, and before si_update_shaders, which uses
2236     * current_rast_prim for this draw_vbo call.
2237     */
2238    if (!HAS_GS && !HAS_TESS) {
2239       enum pipe_prim_type rast_prim;
2240 
2241       if (util_rast_prim_is_triangles(prim)) {
2242          rast_prim = PIPE_PRIM_TRIANGLES;
2243       } else {
2244          /* Only possibilities, POINTS, LINE*, RECTANGLES */
2245          rast_prim = prim;
2246       }
2247 
2248       if (rast_prim != sctx->current_rast_prim) {
2249          if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
2250              util_prim_is_points_or_lines(rast_prim))
2251             si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
2252 
2253          sctx->current_rast_prim = rast_prim;
2254          sctx->do_update_shaders = true;
2255       }
2256    }
2257 
2258    if (IS_DRAW_VERTEX_STATE) {
2259       /* draw_vertex_state doesn't use the current vertex buffers and vertex elements,
2260        * so disable any non-trivial VS prolog that is based on them, such as vertex
2261        * format lowering.
2262        */
2263       if (!sctx->force_trivial_vs_prolog) {
2264          sctx->force_trivial_vs_prolog = true;
2265 
2266          /* Update shaders to disable the non-trivial VS prolog. */
2267          if (sctx->uses_nontrivial_vs_prolog) {
2268             si_vs_key_update_inputs(sctx);
2269             sctx->do_update_shaders = true;
2270          }
2271       }
2272    } else {
2273       if (sctx->force_trivial_vs_prolog) {
2274          sctx->force_trivial_vs_prolog = false;
2275 
2276          /* Update shaders to enable the non-trivial VS prolog. */
2277          if (sctx->uses_nontrivial_vs_prolog) {
2278             si_vs_key_update_inputs(sctx);
2279             sctx->do_update_shaders = true;
2280          }
2281       }
2282    }
2283 
2284    /* Update NGG culling settings. */
2285    uint8_t old_ngg_culling = sctx->ngg_culling;
2286    if (GFX_VERSION >= GFX10) {
2287       struct si_shader_selector *hw_vs = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->cso;
2288 
2289       if (NGG && !HAS_GS &&
2290           /* Tessellation sets ngg_cull_vert_threshold to UINT_MAX if the prim type
2291            * is not points, so this check is only needed without tessellation. */
2292           (HAS_TESS || util_rast_prim_is_lines_or_triangles(sctx->current_rast_prim)) &&
2293           /* Only the first draw for a shader starts with culling disabled and it's disabled
2294            * until we pass the total_direct_count check and then it stays enabled until
2295            * the shader is changed. This eliminates most culling on/off state changes. */
2296           (old_ngg_culling || total_direct_count > hw_vs->ngg_cull_vert_threshold)) {
2297          /* Check that the current shader allows culling. */
2298          assert(hw_vs->ngg_cull_vert_threshold != UINT_MAX);
2299 
2300          uint8_t ngg_culling = sctx->viewport0_y_inverted ? rs->ngg_cull_flags_y_inverted :
2301                                                             rs->ngg_cull_flags;
2302          assert(ngg_culling); /* rasterizer state should always set this to non-zero */
2303 
2304          if (util_prim_is_lines(sctx->current_rast_prim)) {
2305             /* Overwrite it to mask out face cull flags. */
2306             ngg_culling = SI_NGG_CULL_ENABLED | SI_NGG_CULL_LINES;
2307          }
2308 
2309          if (ngg_culling != old_ngg_culling) {
2310             /* If shader compilation is not ready, this setting will be rejected. */
2311             sctx->ngg_culling = ngg_culling;
2312             sctx->do_update_shaders = true;
2313          }
2314       } else if (old_ngg_culling) {
2315          sctx->ngg_culling = 0;
2316          sctx->do_update_shaders = true;
2317       }
2318    }
2319 
2320    if (unlikely(sctx->do_update_shaders)) {
2321       if (unlikely(!(si_update_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG>(sctx)))) {
2322          DRAW_CLEANUP;
2323          return;
2324       }
2325 
2326       /* si_update_shaders can clear the ngg_culling in the shader key if the shader compilation
2327        * hasn't finished. Set it to the correct value in si_context.
2328        */
2329       if (GFX_VERSION >= GFX10 && NGG)
2330          sctx->ngg_culling = si_get_vs_inline(sctx, HAS_TESS, HAS_GS)->current->key.opt.ngg_culling;
2331    }
2332 
2333    /* Since we've called si_context_add_resource_size for vertex buffers,
2334     * this must be called after si_need_cs_space, because we must let
2335     * need_cs_space flush before we add buffers to the buffer list.
2336     *
2337     * This must be done after si_update_shaders because si_update_shaders can
2338     * flush the CS when enabling tess and GS rings.
2339     */
2340    if (sctx->bo_list_add_all_gfx_resources)
2341       si_gfx_resources_add_all_to_bo_list(sctx);
2342 
2343    /* Graphics shader descriptors must be uploaded after si_update_shaders because
2344     * it binds tess and GS ring buffers.
2345     */
2346    if (unlikely(!si_upload_graphics_shader_descriptors(sctx))) {
2347       DRAW_CLEANUP;
2348       return;
2349    }
2350 
2351    /* Vega10/Raven scissor bug workaround. When any context register is
2352     * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
2353     * registers must be written too.
2354     */
2355    unsigned masked_atoms = 0;
2356    bool gfx9_scissor_bug = false;
2357 
2358    if (GFX_VERSION == GFX9 && sctx->screen->info.has_gfx9_scissor_bug) {
2359       masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
2360       gfx9_scissor_bug = true;
2361 
2362       if ((!IS_DRAW_VERTEX_STATE && indirect && indirect->count_from_stream_output) ||
2363           sctx->dirty_atoms & si_atoms_that_always_roll_context() ||
2364           sctx->dirty_states & si_states_that_always_roll_context())
2365          sctx->context_roll = true;
2366    }
2367 
2368    /* Use optimal packet order based on whether we need to sync the pipeline. */
2369    if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB |
2370                                SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH |
2371                                SI_CONTEXT_VS_PARTIAL_FLUSH | SI_CONTEXT_VGT_FLUSH))) {
2372       /* If we have to wait for idle, set all states first, so that all
2373        * SET packets are processed in parallel with previous draw calls.
2374        * Then draw and prefetch at the end. This ensures that the time
2375        * the CUs are idle is very short.
2376        */
2377       if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
2378          masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
2379 
2380       /* Emit all states except possibly render condition. */
2381       si_emit_all_states<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>
2382             (sctx, info, indirect, prim, instance_count, min_direct_count,
2383              primitive_restart, masked_atoms);
2384       sctx->emit_cache_flush(sctx, &sctx->gfx_cs);
2385       /* <-- CUs are idle here. */
2386 
2387       /* This uploads VBO descriptors, sets user SGPRs, and executes the L2 prefetch.
2388        * It should done after cache flushing.
2389        */
2390       if (unlikely((!si_upload_and_prefetch_VB_descriptors
2391                         <GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE, POPCNT>
2392                         (sctx, state, partial_velem_mask)))) {
2393          DRAW_CLEANUP;
2394          return;
2395       }
2396 
2397       if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
2398          sctx->atoms.s.render_cond.emit(sctx);
2399          sctx->dirty_atoms &= ~si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
2400       }
2401 
2402       if (GFX_VERSION == GFX9 && gfx9_scissor_bug &&
2403           (sctx->context_roll || si_is_atom_dirty(sctx, &sctx->atoms.s.scissors))) {
2404          sctx->atoms.s.scissors.emit(sctx);
2405          sctx->dirty_atoms &= ~si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
2406       }
2407       assert(sctx->dirty_atoms == 0);
2408 
2409       si_emit_draw_packets<GFX_VERSION, NGG, IS_DRAW_VERTEX_STATE>
2410             (sctx, info, drawid_offset, indirect, draws, num_draws, total_direct_count, indexbuf,
2411              index_size, index_offset, instance_count, original_index_size);
2412       /* <-- CUs are busy here. */
2413 
2414       /* Start prefetches after the draw has been started. Both will run
2415        * in parallel, but starting the draw first is more important.
2416        */
2417       si_prefetch_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG, PREFETCH_ALL>(sctx);
2418    } else {
2419       /* If we don't wait for idle, start prefetches first, then set
2420        * states, and draw at the end.
2421        */
2422       if (sctx->flags)
2423          sctx->emit_cache_flush(sctx, &sctx->gfx_cs);
2424 
2425       /* Only prefetch the API VS and VBO descriptors. */
2426       si_prefetch_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG, PREFETCH_BEFORE_DRAW>(sctx);
2427 
2428       /* This uploads VBO descriptors, sets user SGPRs, and executes the L2 prefetch.
2429        * It should done after cache flushing and after the VS prefetch.
2430        */
2431       if (unlikely((!si_upload_and_prefetch_VB_descriptors
2432                        <GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE, POPCNT>
2433                        (sctx, state, partial_velem_mask)))) {
2434          DRAW_CLEANUP;
2435          return;
2436       }
2437 
2438       si_emit_all_states<GFX_VERSION, HAS_TESS, HAS_GS, NGG, IS_DRAW_VERTEX_STATE>
2439             (sctx, info, indirect, prim, instance_count, min_direct_count,
2440              primitive_restart, masked_atoms);
2441 
2442       if (GFX_VERSION == GFX9 && gfx9_scissor_bug &&
2443           (sctx->context_roll || si_is_atom_dirty(sctx, &sctx->atoms.s.scissors))) {
2444          sctx->atoms.s.scissors.emit(sctx);
2445          sctx->dirty_atoms &= ~si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
2446       }
2447       assert(sctx->dirty_atoms == 0);
2448 
2449       si_emit_draw_packets<GFX_VERSION, NGG, IS_DRAW_VERTEX_STATE>
2450             (sctx, info, drawid_offset, indirect, draws, num_draws, total_direct_count, indexbuf,
2451              index_size, index_offset, instance_count, original_index_size);
2452 
2453       /* Prefetch the remaining shaders after the draw has been
2454        * started. */
2455       si_prefetch_shaders<GFX_VERSION, HAS_TESS, HAS_GS, NGG, PREFETCH_AFTER_DRAW>(sctx);
2456    }
2457 
2458    /* Clear the context roll flag after the draw call.
2459     * Only used by the gfx9 scissor bug.
2460     */
2461    if (GFX_VERSION == GFX9)
2462       sctx->context_roll = false;
2463 
2464    if (unlikely(sctx->current_saved_cs)) {
2465       si_trace_emit(sctx);
2466       si_log_draw_state(sctx, sctx->log);
2467    }
2468 
2469    /* Workaround for a VGT hang when streamout is enabled.
2470     * It must be done after drawing. */
2471    if (((GFX_VERSION == GFX7 && sctx->family == CHIP_HAWAII) ||
2472         (GFX_VERSION == GFX8 && (sctx->family == CHIP_TONGA || sctx->family == CHIP_FIJI))) &&
2473        si_get_strmout_en(sctx)) {
2474       sctx->flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
2475    }
2476 
2477    if (unlikely(sctx->decompression_enabled)) {
2478       sctx->num_decompress_calls++;
2479    } else {
2480       sctx->num_draw_calls += num_draws;
2481       if (primitive_restart)
2482          sctx->num_prim_restart_calls += num_draws;
2483    }
2484 
2485    if (sctx->framebuffer.state.zsbuf) {
2486       struct si_texture *zstex = (struct si_texture *)sctx->framebuffer.state.zsbuf->texture;
2487       zstex->depth_cleared_level_mask &= ~BITFIELD_BIT(sctx->framebuffer.state.zsbuf->u.tex.level);
2488    }
2489 
2490    DRAW_CLEANUP;
2491 }
2492 
2493 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
si_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2494 static void si_draw_vbo(struct pipe_context *ctx,
2495                         const struct pipe_draw_info *info,
2496                         unsigned drawid_offset,
2497                         const struct pipe_draw_indirect_info *indirect,
2498                         const struct pipe_draw_start_count_bias *draws,
2499                         unsigned num_draws)
2500 {
2501    si_draw<GFX_VERSION, HAS_TESS, HAS_GS, NGG, DRAW_VERTEX_STATE_OFF, POPCNT_NO>
2502       (ctx, info, drawid_offset, indirect, draws, num_draws, NULL, 0);
2503 }
2504 
2505 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
2506           si_has_popcnt POPCNT>
si_draw_vertex_state(struct pipe_context * ctx,struct pipe_vertex_state * vstate,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2507 static void si_draw_vertex_state(struct pipe_context *ctx,
2508                                  struct pipe_vertex_state *vstate,
2509                                  uint32_t partial_velem_mask,
2510                                  struct pipe_draw_vertex_state_info info,
2511                                  const struct pipe_draw_start_count_bias *draws,
2512                                  unsigned num_draws)
2513 {
2514    struct si_vertex_state *state = (struct si_vertex_state *)vstate;
2515    struct pipe_draw_info dinfo = {};
2516 
2517    dinfo.mode = info.mode;
2518    dinfo.index_size = 4;
2519    dinfo.instance_count = 1;
2520    dinfo.index.resource = state->b.input.indexbuf;
2521 
2522    si_draw<GFX_VERSION, HAS_TESS, HAS_GS, NGG, DRAW_VERTEX_STATE_ON, POPCNT>
2523       (ctx, &dinfo, 0, NULL, draws, num_draws, vstate, partial_velem_mask);
2524 
2525    if (info.take_vertex_state_ownership)
2526       pipe_vertex_state_reference(&vstate, NULL);
2527 }
2528 
si_draw_rectangle(struct blitter_context * blitter,void * vertex_elements_cso,blitter_get_vs_func get_vs,int x1,int y1,int x2,int y2,float depth,unsigned num_instances,enum blitter_attrib_type type,const union blitter_attrib * attrib)2529 static void si_draw_rectangle(struct blitter_context *blitter, void *vertex_elements_cso,
2530                               blitter_get_vs_func get_vs, int x1, int y1, int x2, int y2,
2531                               float depth, unsigned num_instances, enum blitter_attrib_type type,
2532                               const union blitter_attrib *attrib)
2533 {
2534    struct pipe_context *pipe = util_blitter_get_pipe(blitter);
2535    struct si_context *sctx = (struct si_context *)pipe;
2536 
2537    /* Pack position coordinates as signed int16. */
2538    sctx->vs_blit_sh_data[0] = (uint32_t)(x1 & 0xffff) | ((uint32_t)(y1 & 0xffff) << 16);
2539    sctx->vs_blit_sh_data[1] = (uint32_t)(x2 & 0xffff) | ((uint32_t)(y2 & 0xffff) << 16);
2540    sctx->vs_blit_sh_data[2] = fui(depth);
2541 
2542    switch (type) {
2543    case UTIL_BLITTER_ATTRIB_COLOR:
2544       memcpy(&sctx->vs_blit_sh_data[3], attrib->color, sizeof(float) * 4);
2545       break;
2546    case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
2547    case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
2548       memcpy(&sctx->vs_blit_sh_data[3], &attrib->texcoord, sizeof(attrib->texcoord));
2549       break;
2550    case UTIL_BLITTER_ATTRIB_NONE:;
2551    }
2552 
2553    pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
2554 
2555    struct pipe_draw_info info = {};
2556    struct pipe_draw_start_count_bias draw;
2557 
2558    info.mode = SI_PRIM_RECTANGLE_LIST;
2559    info.instance_count = num_instances;
2560 
2561    draw.start = 0;
2562    draw.count = 3;
2563 
2564    /* Don't set per-stage shader pointers for VS. */
2565    sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(VERTEX);
2566    sctx->vertex_buffer_pointer_dirty = false;
2567    sctx->vertex_buffer_user_sgprs_dirty = false;
2568 
2569    pipe->draw_vbo(pipe, &info, 0, NULL, &draw, 1);
2570 }
2571 
2572 template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
si_init_draw_vbo(struct si_context * sctx)2573 static void si_init_draw_vbo(struct si_context *sctx)
2574 {
2575    if (NGG && GFX_VERSION < GFX10)
2576       return;
2577 
2578    sctx->draw_vbo[HAS_TESS][HAS_GS][NGG] =
2579       si_draw_vbo<GFX_VERSION, HAS_TESS, HAS_GS, NGG>;
2580 
2581    if (util_get_cpu_caps()->has_popcnt) {
2582       sctx->draw_vertex_state[HAS_TESS][HAS_GS][NGG] =
2583          si_draw_vertex_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG, POPCNT_YES>;
2584    } else {
2585       sctx->draw_vertex_state[HAS_TESS][HAS_GS][NGG] =
2586          si_draw_vertex_state<GFX_VERSION, HAS_TESS, HAS_GS, NGG, POPCNT_NO>;
2587    }
2588 }
2589 
2590 template <chip_class GFX_VERSION>
si_init_draw_vbo_all_pipeline_options(struct si_context * sctx)2591 static void si_init_draw_vbo_all_pipeline_options(struct si_context *sctx)
2592 {
2593    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_OFF, NGG_OFF>(sctx);
2594    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_ON,  NGG_OFF>(sctx);
2595    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_OFF, NGG_OFF>(sctx);
2596    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_ON,  NGG_OFF>(sctx);
2597    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_OFF, NGG_ON>(sctx);
2598    si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_ON,  NGG_ON>(sctx);
2599    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_OFF, NGG_ON>(sctx);
2600    si_init_draw_vbo<GFX_VERSION, TESS_ON,  GS_ON,  NGG_ON>(sctx);
2601 }
2602 
si_invalid_draw_vbo(struct pipe_context * pipe,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2603 static void si_invalid_draw_vbo(struct pipe_context *pipe,
2604                                 const struct pipe_draw_info *info,
2605                                 unsigned drawid_offset,
2606                                 const struct pipe_draw_indirect_info *indirect,
2607                                 const struct pipe_draw_start_count_bias *draws,
2608                                 unsigned num_draws)
2609 {
2610    unreachable("vertex shader not bound");
2611 }
2612 
si_invalid_draw_vertex_state(struct pipe_context * ctx,struct pipe_vertex_state * vstate,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2613 static void si_invalid_draw_vertex_state(struct pipe_context *ctx,
2614                                          struct pipe_vertex_state *vstate,
2615                                          uint32_t partial_velem_mask,
2616                                          struct pipe_draw_vertex_state_info info,
2617                                          const struct pipe_draw_start_count_bias *draws,
2618                                          unsigned num_draws)
2619 {
2620    unreachable("vertex shader not bound");
2621 }
2622 
2623 extern "C"
GFX(si_init_draw_functions_)2624 void GFX(si_init_draw_functions_)(struct si_context *sctx)
2625 {
2626    assert(sctx->chip_class == GFX());
2627 
2628    si_init_draw_vbo_all_pipeline_options<GFX()>(sctx);
2629 
2630    /* Bind a fake draw_vbo, so that draw_vbo isn't NULL, which would skip
2631     * initialization of callbacks in upper layers (such as u_threaded_context).
2632     */
2633    sctx->b.draw_vbo = si_invalid_draw_vbo;
2634    sctx->b.draw_vertex_state = si_invalid_draw_vertex_state;
2635    sctx->blitter->draw_rectangle = si_draw_rectangle;
2636 
2637    si_init_ia_multi_vgt_param_table(sctx);
2638 }
2639 
2640 #if GFX_VER == 6 /* declare this function only once because it supports all chips. */
2641 
2642 extern "C"
si_init_spi_map_functions(struct si_context * sctx)2643 void si_init_spi_map_functions(struct si_context *sctx)
2644 {
2645    /* This unrolls the loops in si_emit_spi_map and inlines memcmp and memcpys.
2646     * It improves performance for viewperf/snx.
2647     */
2648    sctx->emit_spi_map[0] = si_emit_spi_map<0>;
2649    sctx->emit_spi_map[1] = si_emit_spi_map<1>;
2650    sctx->emit_spi_map[2] = si_emit_spi_map<2>;
2651    sctx->emit_spi_map[3] = si_emit_spi_map<3>;
2652    sctx->emit_spi_map[4] = si_emit_spi_map<4>;
2653    sctx->emit_spi_map[5] = si_emit_spi_map<5>;
2654    sctx->emit_spi_map[6] = si_emit_spi_map<6>;
2655    sctx->emit_spi_map[7] = si_emit_spi_map<7>;
2656    sctx->emit_spi_map[8] = si_emit_spi_map<8>;
2657    sctx->emit_spi_map[9] = si_emit_spi_map<9>;
2658    sctx->emit_spi_map[10] = si_emit_spi_map<10>;
2659    sctx->emit_spi_map[11] = si_emit_spi_map<11>;
2660    sctx->emit_spi_map[12] = si_emit_spi_map<12>;
2661    sctx->emit_spi_map[13] = si_emit_spi_map<13>;
2662    sctx->emit_spi_map[14] = si_emit_spi_map<14>;
2663    sctx->emit_spi_map[15] = si_emit_spi_map<15>;
2664    sctx->emit_spi_map[16] = si_emit_spi_map<16>;
2665    sctx->emit_spi_map[17] = si_emit_spi_map<17>;
2666    sctx->emit_spi_map[18] = si_emit_spi_map<18>;
2667    sctx->emit_spi_map[19] = si_emit_spi_map<19>;
2668    sctx->emit_spi_map[20] = si_emit_spi_map<20>;
2669    sctx->emit_spi_map[21] = si_emit_spi_map<21>;
2670    sctx->emit_spi_map[22] = si_emit_spi_map<22>;
2671    sctx->emit_spi_map[23] = si_emit_spi_map<23>;
2672    sctx->emit_spi_map[24] = si_emit_spi_map<24>;
2673    sctx->emit_spi_map[25] = si_emit_spi_map<25>;
2674    sctx->emit_spi_map[26] = si_emit_spi_map<26>;
2675    sctx->emit_spi_map[27] = si_emit_spi_map<27>;
2676    sctx->emit_spi_map[28] = si_emit_spi_map<28>;
2677    sctx->emit_spi_map[29] = si_emit_spi_map<29>;
2678    sctx->emit_spi_map[30] = si_emit_spi_map<30>;
2679    sctx->emit_spi_map[31] = si_emit_spi_map<31>;
2680    sctx->emit_spi_map[32] = si_emit_spi_map<32>;
2681 }
2682 
2683 #endif
2684