1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "ac_nir.h"
26 #include "ac_shader_util.h"
27 #include "compiler/nir/nir_serialize.h"
28 #include "nir/tgsi_to_nir.h"
29 #include "si_build_pm4.h"
30 #include "sid.h"
31 #include "util/crc32.h"
32 #include "util/disk_cache.h"
33 #include "util/hash_table.h"
34 #include "util/mesa-sha1.h"
35 #include "util/u_async_debug.h"
36 #include "util/u_memory.h"
37 #include "util/u_prim.h"
38 #include "tgsi/tgsi_from_mesa.h"
39
si_determine_wave_size(struct si_screen * sscreen,struct si_shader * shader)40 unsigned si_determine_wave_size(struct si_screen *sscreen, struct si_shader *shader)
41 {
42 /* There are a few uses that pass shader=NULL here, expecting the default compute wave size. */
43 struct si_shader_info *info = shader ? &shader->selector->info : NULL;
44 gl_shader_stage stage = info ? info->stage : MESA_SHADER_COMPUTE;
45
46 if (sscreen->info.chip_class < GFX10)
47 return 64;
48
49 /* Legacy GS only supports Wave64. */
50 if ((stage == MESA_SHADER_VERTEX && shader->key.ge.as_es && !shader->key.ge.as_ngg) ||
51 (stage == MESA_SHADER_TESS_EVAL && shader->key.ge.as_es && !shader->key.ge.as_ngg) ||
52 (stage == MESA_SHADER_GEOMETRY && !shader->key.ge.as_ngg))
53 return 64;
54
55 /* Small workgroups use Wave32 unconditionally. */
56 if (stage == MESA_SHADER_COMPUTE && info &&
57 !info->base.workgroup_size_variable &&
58 info->base.workgroup_size[0] *
59 info->base.workgroup_size[1] *
60 info->base.workgroup_size[2] <= 32)
61 return 32;
62
63 /* Debug flags. */
64 unsigned dbg_wave_size = 0;
65 if (sscreen->debug_flags &
66 (stage == MESA_SHADER_COMPUTE ? DBG(W32_CS) :
67 stage == MESA_SHADER_FRAGMENT ? DBG(W32_PS) | DBG(W32_PS_DISCARD) : DBG(W32_GE)))
68 dbg_wave_size = 32;
69
70 if (sscreen->debug_flags &
71 (stage == MESA_SHADER_COMPUTE ? DBG(W64_CS) :
72 stage == MESA_SHADER_FRAGMENT ? DBG(W64_PS) : DBG(W64_GE))) {
73 assert(!dbg_wave_size);
74 dbg_wave_size = 64;
75 }
76
77 /* Shader profiles. */
78 unsigned profile_wave_size = 0;
79 if (info && info->options & SI_PROFILE_WAVE32)
80 profile_wave_size = 32;
81
82 if (info && info->options & SI_PROFILE_WAVE64) {
83 assert(!profile_wave_size);
84 profile_wave_size = 64;
85 }
86
87 if (profile_wave_size) {
88 /* Only debug flags override shader profiles. */
89 if (dbg_wave_size)
90 return dbg_wave_size;
91
92 return profile_wave_size;
93 }
94
95 /* LLVM 13 and 14 have a bug that causes compile failures with discard in Wave32
96 * in some cases. Alpha test in Wave32 is luckily unaffected.
97 */
98 if (stage == MESA_SHADER_FRAGMENT && info->base.fs.uses_discard &&
99 !(info && info->options & SI_PROFILE_IGNORE_LLVM13_DISCARD_BUG) &&
100 LLVM_VERSION_MAJOR == 13 && !(sscreen->debug_flags & DBG(W32_PS_DISCARD)))
101 return 64;
102
103 /* Debug flags except w32psdiscard don't override the discard bug workaround,
104 * but they override everything else.
105 */
106 if (dbg_wave_size)
107 return dbg_wave_size;
108
109 /* Pixel shaders without interp instructions don't suffer from reduced interpolation
110 * performance in Wave32, so use Wave32. This helps Piano and Voloplosion.
111 */
112 if (stage == MESA_SHADER_FRAGMENT && !info->num_inputs)
113 return 32;
114
115 /* There are a few very rare cases where VS is better with Wave32, and there are no known
116 * cases where Wave64 is better.
117 */
118 if (stage <= MESA_SHADER_GEOMETRY)
119 return 32;
120
121 /* TODO: Merged shaders must use the same wave size because the driver doesn't recompile
122 * individual shaders of merged shaders to match the wave size between them.
123 */
124 bool merged_shader = shader && !shader->is_gs_copy_shader &&
125 (shader->key.ge.as_ls || shader->key.ge.as_es ||
126 stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_GEOMETRY);
127
128 /* Divergent loops in Wave64 can end up having too many iterations in one half of the wave
129 * while the other half is idling but occupying VGPRs, preventing other waves from launching.
130 * Wave32 eliminates the idling half to allow the next wave to start.
131 */
132 if (!merged_shader && info && info->has_divergent_loop)
133 return 32;
134
135 return 64;
136 }
137
138 /* SHADER_CACHE */
139
140 /**
141 * Return the IR key for the shader cache.
142 */
si_get_ir_cache_key(struct si_shader_selector * sel,bool ngg,bool es,unsigned wave_size,unsigned char ir_sha1_cache_key[20])143 void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
144 unsigned wave_size, unsigned char ir_sha1_cache_key[20])
145 {
146 struct blob blob = {};
147 unsigned ir_size;
148 void *ir_binary;
149
150 if (sel->nir_binary) {
151 ir_binary = sel->nir_binary;
152 ir_size = sel->nir_size;
153 } else {
154 assert(sel->nir);
155
156 blob_init(&blob);
157 nir_serialize(&blob, sel->nir, true);
158 ir_binary = blob.data;
159 ir_size = blob.size;
160 }
161
162 /* These settings affect the compilation, but they are not derived
163 * from the input shader IR.
164 */
165 unsigned shader_variant_flags = 0;
166
167 if (ngg)
168 shader_variant_flags |= 1 << 0;
169 if (sel->nir)
170 shader_variant_flags |= 1 << 1;
171 if (wave_size == 32)
172 shader_variant_flags |= 1 << 2;
173 if (sel->info.stage == MESA_SHADER_FRAGMENT &&
174 /* Derivatives imply helper invocations so check for needs_quad_helper_invocations. */
175 sel->info.base.fs.needs_quad_helper_invocations &&
176 sel->info.base.fs.uses_discard &&
177 sel->screen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL))
178 shader_variant_flags |= 1 << 3;
179 /* use_ngg_culling disables NGG passthrough for non-culling shaders to reduce context
180 * rolls, which can be changed with AMD_DEBUG=nonggc or AMD_DEBUG=nggc.
181 */
182 if (sel->screen->use_ngg_culling)
183 shader_variant_flags |= 1 << 4;
184 if (sel->screen->record_llvm_ir)
185 shader_variant_flags |= 1 << 5;
186
187 /* bit gap */
188
189 if (sel->screen->options.no_infinite_interp)
190 shader_variant_flags |= 1 << 7;
191 if (sel->screen->options.clamp_div_by_zero)
192 shader_variant_flags |= 1 << 8;
193 if (sel->screen->debug_flags & DBG(GISEL))
194 shader_variant_flags |= 1 << 9;
195 if ((sel->info.stage == MESA_SHADER_VERTEX ||
196 sel->info.stage == MESA_SHADER_TESS_EVAL ||
197 sel->info.stage == MESA_SHADER_GEOMETRY) &&
198 !es &&
199 sel->screen->options.vrs2x2)
200 shader_variant_flags |= 1 << 10;
201 if (sel->screen->options.inline_uniforms)
202 shader_variant_flags |= 1 << 11;
203
204 struct mesa_sha1 ctx;
205 _mesa_sha1_init(&ctx);
206 _mesa_sha1_update(&ctx, &shader_variant_flags, 4);
207 _mesa_sha1_update(&ctx, ir_binary, ir_size);
208 if (sel->info.stage == MESA_SHADER_VERTEX || sel->info.stage == MESA_SHADER_TESS_EVAL ||
209 sel->info.stage == MESA_SHADER_GEOMETRY)
210 _mesa_sha1_update(&ctx, &sel->so, sizeof(sel->so));
211 _mesa_sha1_final(&ctx, ir_sha1_cache_key);
212
213 if (ir_binary == blob.data)
214 blob_finish(&blob);
215 }
216
217 /** Copy "data" to "ptr" and return the next dword following copied data. */
write_data(uint32_t * ptr,const void * data,unsigned size)218 static uint32_t *write_data(uint32_t *ptr, const void *data, unsigned size)
219 {
220 /* data may be NULL if size == 0 */
221 if (size)
222 memcpy(ptr, data, size);
223 ptr += DIV_ROUND_UP(size, 4);
224 return ptr;
225 }
226
227 /** Read data from "ptr". Return the next dword following the data. */
read_data(uint32_t * ptr,void * data,unsigned size)228 static uint32_t *read_data(uint32_t *ptr, void *data, unsigned size)
229 {
230 memcpy(data, ptr, size);
231 ptr += DIV_ROUND_UP(size, 4);
232 return ptr;
233 }
234
235 /**
236 * Write the size as uint followed by the data. Return the next dword
237 * following the copied data.
238 */
write_chunk(uint32_t * ptr,const void * data,unsigned size)239 static uint32_t *write_chunk(uint32_t *ptr, const void *data, unsigned size)
240 {
241 *ptr++ = size;
242 return write_data(ptr, data, size);
243 }
244
245 /**
246 * Read the size as uint followed by the data. Return both via parameters.
247 * Return the next dword following the data.
248 */
read_chunk(uint32_t * ptr,void ** data,unsigned * size)249 static uint32_t *read_chunk(uint32_t *ptr, void **data, unsigned *size)
250 {
251 *size = *ptr++;
252 assert(*data == NULL);
253 if (!*size)
254 return ptr;
255 *data = malloc(*size);
256 return read_data(ptr, *data, *size);
257 }
258
259 /**
260 * Return the shader binary in a buffer. The first 4 bytes contain its size
261 * as integer.
262 */
si_get_shader_binary(struct si_shader * shader)263 static uint32_t *si_get_shader_binary(struct si_shader *shader)
264 {
265 /* There is always a size of data followed by the data itself. */
266 unsigned llvm_ir_size =
267 shader->binary.llvm_ir_string ? strlen(shader->binary.llvm_ir_string) + 1 : 0;
268
269 /* Refuse to allocate overly large buffers and guard against integer
270 * overflow. */
271 if (shader->binary.elf_size > UINT_MAX / 4 || llvm_ir_size > UINT_MAX / 4)
272 return NULL;
273
274 unsigned size = 4 + /* total size */
275 4 + /* CRC32 of the data below */
276 align(sizeof(shader->config), 4) + align(sizeof(shader->info), 4) + 4 +
277 align(shader->binary.elf_size, 4) + 4 + align(llvm_ir_size, 4);
278 uint32_t *buffer = (uint32_t*)CALLOC(1, size);
279 uint32_t *ptr = buffer;
280
281 if (!buffer)
282 return NULL;
283
284 *ptr++ = size;
285 ptr++; /* CRC32 is calculated at the end. */
286
287 ptr = write_data(ptr, &shader->config, sizeof(shader->config));
288 ptr = write_data(ptr, &shader->info, sizeof(shader->info));
289 ptr = write_chunk(ptr, shader->binary.elf_buffer, shader->binary.elf_size);
290 ptr = write_chunk(ptr, shader->binary.llvm_ir_string, llvm_ir_size);
291 assert((char *)ptr - (char *)buffer == (ptrdiff_t)size);
292
293 /* Compute CRC32. */
294 ptr = buffer;
295 ptr++;
296 *ptr = util_hash_crc32(ptr + 1, size - 8);
297
298 return buffer;
299 }
300
si_load_shader_binary(struct si_shader * shader,void * binary)301 static bool si_load_shader_binary(struct si_shader *shader, void *binary)
302 {
303 uint32_t *ptr = (uint32_t *)binary;
304 uint32_t size = *ptr++;
305 uint32_t crc32 = *ptr++;
306 unsigned chunk_size;
307 unsigned elf_size;
308
309 if (util_hash_crc32(ptr, size - 8) != crc32) {
310 fprintf(stderr, "radeonsi: binary shader has invalid CRC32\n");
311 return false;
312 }
313
314 ptr = read_data(ptr, &shader->config, sizeof(shader->config));
315 ptr = read_data(ptr, &shader->info, sizeof(shader->info));
316 ptr = read_chunk(ptr, (void **)&shader->binary.elf_buffer, &elf_size);
317 shader->binary.elf_size = elf_size;
318 ptr = read_chunk(ptr, (void **)&shader->binary.llvm_ir_string, &chunk_size);
319
320 if (!shader->is_gs_copy_shader &&
321 shader->selector->info.stage == MESA_SHADER_GEOMETRY && !shader->key.ge.as_ngg) {
322 shader->gs_copy_shader = CALLOC_STRUCT(si_shader);
323 if (!shader->gs_copy_shader)
324 return false;
325
326 shader->gs_copy_shader->is_gs_copy_shader = true;
327
328 if (!si_load_shader_binary(shader->gs_copy_shader, (uint8_t*)binary + size)) {
329 FREE(shader->gs_copy_shader);
330 shader->gs_copy_shader = NULL;
331 return false;
332 }
333
334 util_queue_fence_init(&shader->gs_copy_shader->ready);
335 shader->gs_copy_shader->selector = shader->selector;
336 shader->gs_copy_shader->is_gs_copy_shader = true;
337 shader->gs_copy_shader->wave_size =
338 si_determine_wave_size(shader->selector->screen, shader->gs_copy_shader);
339
340 si_shader_binary_upload(shader->selector->screen, shader->gs_copy_shader, 0);
341 }
342
343 return true;
344 }
345
346 /**
347 * Insert a shader into the cache. It's assumed the shader is not in the cache.
348 * Use si_shader_cache_load_shader before calling this.
349 */
si_shader_cache_insert_shader(struct si_screen * sscreen,unsigned char ir_sha1_cache_key[20],struct si_shader * shader,bool insert_into_disk_cache)350 void si_shader_cache_insert_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
351 struct si_shader *shader, bool insert_into_disk_cache)
352 {
353 uint32_t *hw_binary;
354 struct hash_entry *entry;
355 uint8_t key[CACHE_KEY_SIZE];
356 bool memory_cache_full = sscreen->shader_cache_size >= sscreen->shader_cache_max_size;
357
358 if (!insert_into_disk_cache && memory_cache_full)
359 return;
360
361 entry = _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key);
362 if (entry)
363 return; /* already added */
364
365 hw_binary = si_get_shader_binary(shader);
366 if (!hw_binary)
367 return;
368
369 unsigned size = *hw_binary;
370
371 if (shader->selector->info.stage == MESA_SHADER_GEOMETRY && !shader->key.ge.as_ngg) {
372 uint32_t *gs_copy_binary = si_get_shader_binary(shader->gs_copy_shader);
373 if (!gs_copy_binary) {
374 FREE(hw_binary);
375 return;
376 }
377
378 /* Combine both binaries. */
379 size += *gs_copy_binary;
380 uint32_t *combined_binary = (uint32_t*)MALLOC(size);
381 if (!combined_binary) {
382 FREE(hw_binary);
383 FREE(gs_copy_binary);
384 return;
385 }
386
387 memcpy(combined_binary, hw_binary, *hw_binary);
388 memcpy(combined_binary + *hw_binary / 4, gs_copy_binary, *gs_copy_binary);
389 FREE(hw_binary);
390 FREE(gs_copy_binary);
391 hw_binary = combined_binary;
392 }
393
394 if (!memory_cache_full) {
395 if (_mesa_hash_table_insert(sscreen->shader_cache,
396 mem_dup(ir_sha1_cache_key, 20),
397 hw_binary) == NULL) {
398 FREE(hw_binary);
399 return;
400 }
401
402 sscreen->shader_cache_size += size;
403 }
404
405 if (sscreen->disk_shader_cache && insert_into_disk_cache) {
406 disk_cache_compute_key(sscreen->disk_shader_cache, ir_sha1_cache_key, 20, key);
407 disk_cache_put(sscreen->disk_shader_cache, key, hw_binary, size, NULL);
408 }
409
410 if (memory_cache_full)
411 FREE(hw_binary);
412 }
413
si_shader_cache_load_shader(struct si_screen * sscreen,unsigned char ir_sha1_cache_key[20],struct si_shader * shader)414 bool si_shader_cache_load_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
415 struct si_shader *shader)
416 {
417 struct hash_entry *entry = _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key);
418
419 if (entry) {
420 if (si_load_shader_binary(shader, entry->data)) {
421 p_atomic_inc(&sscreen->num_memory_shader_cache_hits);
422 return true;
423 }
424 }
425 p_atomic_inc(&sscreen->num_memory_shader_cache_misses);
426
427 if (!sscreen->disk_shader_cache)
428 return false;
429
430 unsigned char sha1[CACHE_KEY_SIZE];
431 disk_cache_compute_key(sscreen->disk_shader_cache, ir_sha1_cache_key, 20, sha1);
432
433 size_t total_size;
434 uint32_t *buffer = (uint32_t*)disk_cache_get(sscreen->disk_shader_cache, sha1, &total_size);
435 if (buffer) {
436 unsigned size = *buffer;
437 unsigned gs_copy_binary_size = 0;
438
439 /* The GS copy shader binary is after the GS binary. */
440 if (shader->selector->info.stage == MESA_SHADER_GEOMETRY && !shader->key.ge.as_ngg)
441 gs_copy_binary_size = buffer[size / 4];
442
443 if (total_size >= sizeof(uint32_t) && size + gs_copy_binary_size == total_size) {
444 if (si_load_shader_binary(shader, buffer)) {
445 free(buffer);
446 si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key, shader, false);
447 p_atomic_inc(&sscreen->num_disk_shader_cache_hits);
448 return true;
449 }
450 } else {
451 /* Something has gone wrong discard the item from the cache and
452 * rebuild/link from source.
453 */
454 assert(!"Invalid radeonsi shader disk cache item!");
455 disk_cache_remove(sscreen->disk_shader_cache, sha1);
456 }
457 }
458
459 free(buffer);
460 p_atomic_inc(&sscreen->num_disk_shader_cache_misses);
461 return false;
462 }
463
si_shader_cache_key_hash(const void * key)464 static uint32_t si_shader_cache_key_hash(const void *key)
465 {
466 /* Take the first dword of SHA1. */
467 return *(uint32_t *)key;
468 }
469
si_shader_cache_key_equals(const void * a,const void * b)470 static bool si_shader_cache_key_equals(const void *a, const void *b)
471 {
472 /* Compare SHA1s. */
473 return memcmp(a, b, 20) == 0;
474 }
475
si_destroy_shader_cache_entry(struct hash_entry * entry)476 static void si_destroy_shader_cache_entry(struct hash_entry *entry)
477 {
478 FREE((void *)entry->key);
479 FREE(entry->data);
480 }
481
si_init_shader_cache(struct si_screen * sscreen)482 bool si_init_shader_cache(struct si_screen *sscreen)
483 {
484 (void)simple_mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
485 sscreen->shader_cache =
486 _mesa_hash_table_create(NULL, si_shader_cache_key_hash, si_shader_cache_key_equals);
487 sscreen->shader_cache_size = 0;
488 /* Maximum size: 64MB on 32 bits, 1GB else */
489 sscreen->shader_cache_max_size = ((sizeof(void *) == 4) ? 64 : 1024) * 1024 * 1024;
490
491 return sscreen->shader_cache != NULL;
492 }
493
si_destroy_shader_cache(struct si_screen * sscreen)494 void si_destroy_shader_cache(struct si_screen *sscreen)
495 {
496 if (sscreen->shader_cache)
497 _mesa_hash_table_destroy(sscreen->shader_cache, si_destroy_shader_cache_entry);
498 simple_mtx_destroy(&sscreen->shader_cache_mutex);
499 }
500
501 /* SHADER STATES */
502
si_shader_mem_ordered(struct si_shader * shader)503 bool si_shader_mem_ordered(struct si_shader *shader)
504 {
505 if (shader->selector->screen->info.chip_class < GFX10)
506 return false;
507
508 /* Return true if both types of VMEM that return something are used. */
509 return shader->info.uses_vmem_sampler_or_bvh &&
510 (shader->info.uses_vmem_load_other ||
511 shader->config.scratch_bytes_per_wave);
512 }
513
si_set_tesseval_regs(struct si_screen * sscreen,const struct si_shader_selector * tes,struct si_shader * shader)514 static void si_set_tesseval_regs(struct si_screen *sscreen, const struct si_shader_selector *tes,
515 struct si_shader *shader)
516 {
517 const struct si_shader_info *info = &tes->info;
518 enum tess_primitive_mode tes_prim_mode = info->base.tess._primitive_mode;
519 unsigned tes_spacing = info->base.tess.spacing;
520 bool tes_vertex_order_cw = !info->base.tess.ccw;
521 bool tes_point_mode = info->base.tess.point_mode;
522 unsigned type, partitioning, topology, distribution_mode;
523
524 switch (tes_prim_mode) {
525 case TESS_PRIMITIVE_ISOLINES:
526 type = V_028B6C_TESS_ISOLINE;
527 break;
528 case TESS_PRIMITIVE_TRIANGLES:
529 type = V_028B6C_TESS_TRIANGLE;
530 break;
531 case TESS_PRIMITIVE_QUADS:
532 type = V_028B6C_TESS_QUAD;
533 break;
534 default:
535 assert(0);
536 return;
537 }
538
539 switch (tes_spacing) {
540 case TESS_SPACING_FRACTIONAL_ODD:
541 partitioning = V_028B6C_PART_FRAC_ODD;
542 break;
543 case TESS_SPACING_FRACTIONAL_EVEN:
544 partitioning = V_028B6C_PART_FRAC_EVEN;
545 break;
546 case TESS_SPACING_EQUAL:
547 partitioning = V_028B6C_PART_INTEGER;
548 break;
549 default:
550 assert(0);
551 return;
552 }
553
554 if (tes_point_mode)
555 topology = V_028B6C_OUTPUT_POINT;
556 else if (tes_prim_mode == TESS_PRIMITIVE_ISOLINES)
557 topology = V_028B6C_OUTPUT_LINE;
558 else if (tes_vertex_order_cw)
559 /* for some reason, this must be the other way around */
560 topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
561 else
562 topology = V_028B6C_OUTPUT_TRIANGLE_CW;
563
564 if (sscreen->info.has_distributed_tess) {
565 if (sscreen->info.family == CHIP_FIJI || sscreen->info.family >= CHIP_POLARIS10)
566 distribution_mode = V_028B6C_TRAPEZOIDS;
567 else
568 distribution_mode = V_028B6C_DONUTS;
569 } else
570 distribution_mode = V_028B6C_NO_DIST;
571
572 shader->vgt_tf_param = S_028B6C_TYPE(type) | S_028B6C_PARTITIONING(partitioning) |
573 S_028B6C_TOPOLOGY(topology) |
574 S_028B6C_DISTRIBUTION_MODE(distribution_mode);
575 }
576
577 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
578 * whether the "fractional odd" tessellation spacing is used.
579 *
580 * Possible VGT configurations and which state should set the register:
581 *
582 * Reg set in | VGT shader configuration | Value
583 * ------------------------------------------------------
584 * VS as VS | VS | 30
585 * VS as ES | ES -> GS -> VS | 30
586 * TES as VS | LS -> HS -> VS | 14 or 30
587 * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
588 */
polaris_set_vgt_vertex_reuse(struct si_screen * sscreen,struct si_shader_selector * sel,struct si_shader * shader)589 static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen, struct si_shader_selector *sel,
590 struct si_shader *shader)
591 {
592 if (sscreen->info.family < CHIP_POLARIS10 || sscreen->info.chip_class >= GFX10)
593 return;
594
595 /* VS as VS, or VS as ES: */
596 if ((sel->info.stage == MESA_SHADER_VERTEX &&
597 (!shader->key.ge.as_ls && !shader->is_gs_copy_shader)) ||
598 /* TES as VS, or TES as ES: */
599 sel->info.stage == MESA_SHADER_TESS_EVAL) {
600 unsigned vtx_reuse_depth = 30;
601
602 if (sel->info.stage == MESA_SHADER_TESS_EVAL &&
603 sel->info.base.tess.spacing == TESS_SPACING_FRACTIONAL_ODD)
604 vtx_reuse_depth = 14;
605
606 shader->vgt_vertex_reuse_block_cntl = vtx_reuse_depth;
607 }
608 }
609
si_get_shader_pm4_state(struct si_shader * shader)610 static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader)
611 {
612 si_pm4_clear_state(&shader->pm4);
613 shader->pm4.is_shader = true;
614 return &shader->pm4;
615 }
616
si_get_num_vs_user_sgprs(struct si_shader * shader,unsigned num_always_on_user_sgprs)617 static unsigned si_get_num_vs_user_sgprs(struct si_shader *shader,
618 unsigned num_always_on_user_sgprs)
619 {
620 struct si_shader_selector *vs =
621 shader->previous_stage_sel ? shader->previous_stage_sel : shader->selector;
622 unsigned num_vbos_in_user_sgprs = vs->num_vbos_in_user_sgprs;
623
624 /* 1 SGPR is reserved for the vertex buffer pointer. */
625 assert(num_always_on_user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST - 1);
626
627 if (num_vbos_in_user_sgprs)
628 return SI_SGPR_VS_VB_DESCRIPTOR_FIRST + num_vbos_in_user_sgprs * 4;
629
630 /* Add the pointer to VBO descriptors. */
631 return num_always_on_user_sgprs + 1;
632 }
633
634 /* Return VGPR_COMP_CNT for the API vertex shader. This can be hw LS, LSHS, ES, ESGS, VS. */
si_get_vs_vgpr_comp_cnt(struct si_screen * sscreen,struct si_shader * shader,bool legacy_vs_prim_id)635 static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen, struct si_shader *shader,
636 bool legacy_vs_prim_id)
637 {
638 assert(shader->selector->info.stage == MESA_SHADER_VERTEX ||
639 (shader->previous_stage_sel && shader->previous_stage_sel->info.stage == MESA_SHADER_VERTEX));
640
641 /* GFX6-9 LS (VertexID, RelAutoIndex, InstanceID / StepRate0, InstanceID)
642 * GFX6-9 ES,VS (VertexID, InstanceID / StepRate0, VSPrimID, InstanceID)
643 * GFX10 LS (VertexID, RelAutoIndex, UserVGPR1, UserVGPR2 or InstanceID)
644 * GFX10 ES,VS (VertexID, UserVGPR1, UserVGPR2 or VSPrimID, UserVGPR3 or InstanceID)
645 */
646 bool is_ls = shader->selector->info.stage == MESA_SHADER_TESS_CTRL || shader->key.ge.as_ls;
647 unsigned max = 0;
648
649 if (shader->info.uses_instanceid) {
650 if (sscreen->info.chip_class >= GFX10)
651 max = MAX2(max, 3);
652 else if (is_ls)
653 max = MAX2(max, 2); /* use (InstanceID / StepRate0) because StepRate0 == 1 */
654 else
655 max = MAX2(max, 1); /* use (InstanceID / StepRate0) because StepRate0 == 1 */
656 }
657
658 if (legacy_vs_prim_id)
659 max = MAX2(max, 2); /* VSPrimID */
660
661 if (is_ls)
662 max = MAX2(max, 1); /* RelAutoIndex */
663
664 return max;
665 }
666
si_shader_ls(struct si_screen * sscreen,struct si_shader * shader)667 static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
668 {
669 struct si_pm4_state *pm4;
670 uint64_t va;
671
672 assert(sscreen->info.chip_class <= GFX8);
673
674 pm4 = si_get_shader_pm4_state(shader);
675 if (!pm4)
676 return;
677
678 va = shader->bo->gpu_address;
679 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
680
681 shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
682 S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) |
683 S_00B528_VGPR_COMP_CNT(si_get_vs_vgpr_comp_cnt(sscreen, shader, false)) |
684 S_00B528_DX10_CLAMP(1) | S_00B528_FLOAT_MODE(shader->config.float_mode);
685 shader->config.rsrc2 =
686 S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(shader, SI_VS_NUM_USER_SGPR)) |
687 S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
688 }
689
si_shader_hs(struct si_screen * sscreen,struct si_shader * shader)690 static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader)
691 {
692 struct si_pm4_state *pm4;
693 uint64_t va;
694
695 pm4 = si_get_shader_pm4_state(shader);
696 if (!pm4)
697 return;
698
699 va = shader->bo->gpu_address;
700
701 if (sscreen->info.chip_class >= GFX9) {
702 if (sscreen->info.chip_class >= GFX10) {
703 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
704 } else {
705 si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
706 }
707
708 unsigned num_user_sgprs = si_get_num_vs_user_sgprs(shader, GFX9_TCS_NUM_USER_SGPR);
709
710 shader->config.rsrc2 = S_00B42C_USER_SGPR(num_user_sgprs) |
711 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
712
713 if (sscreen->info.chip_class >= GFX10)
714 shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
715 else
716 shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
717 } else {
718 si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
719 si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS,
720 S_00B424_MEM_BASE(sscreen->info.address32_hi >> 8));
721
722 shader->config.rsrc2 = S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR) | S_00B42C_OC_LDS_EN(1) |
723 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
724 }
725
726 si_pm4_set_reg(
727 pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
728 S_00B428_VGPRS((shader->config.num_vgprs - 1) / (shader->wave_size == 32 ? 8 : 4)) |
729 (sscreen->info.chip_class <= GFX9 ? S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8)
730 : 0) |
731 S_00B428_DX10_CLAMP(1) | S_00B428_MEM_ORDERED(si_shader_mem_ordered(shader)) |
732 S_00B428_WGP_MODE(sscreen->info.chip_class >= GFX10) |
733 S_00B428_FLOAT_MODE(shader->config.float_mode) |
734 S_00B428_LS_VGPR_COMP_CNT(sscreen->info.chip_class >= GFX9
735 ? si_get_vs_vgpr_comp_cnt(sscreen, shader, false)
736 : 0));
737
738 if (sscreen->info.chip_class <= GFX8) {
739 si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, shader->config.rsrc2);
740 }
741 }
742
si_emit_shader_es(struct si_context * sctx)743 static void si_emit_shader_es(struct si_context *sctx)
744 {
745 struct si_shader *shader = sctx->queued.named.es;
746 if (!shader)
747 return;
748
749 radeon_begin(&sctx->gfx_cs);
750 radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
751 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
752 shader->selector->esgs_itemsize / 4);
753
754 if (shader->selector->info.stage == MESA_SHADER_TESS_EVAL)
755 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, SI_TRACKED_VGT_TF_PARAM,
756 shader->vgt_tf_param);
757
758 if (shader->vgt_vertex_reuse_block_cntl)
759 radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
760 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
761 shader->vgt_vertex_reuse_block_cntl);
762 radeon_end_update_context_roll(sctx);
763 }
764
si_shader_es(struct si_screen * sscreen,struct si_shader * shader)765 static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
766 {
767 struct si_pm4_state *pm4;
768 unsigned num_user_sgprs;
769 unsigned vgpr_comp_cnt;
770 uint64_t va;
771 unsigned oc_lds_en;
772
773 assert(sscreen->info.chip_class <= GFX8);
774
775 pm4 = si_get_shader_pm4_state(shader);
776 if (!pm4)
777 return;
778
779 pm4->atom.emit = si_emit_shader_es;
780 va = shader->bo->gpu_address;
781
782 if (shader->selector->info.stage == MESA_SHADER_VERTEX) {
783 vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
784 num_user_sgprs = si_get_num_vs_user_sgprs(shader, SI_VS_NUM_USER_SGPR);
785 } else if (shader->selector->info.stage == MESA_SHADER_TESS_EVAL) {
786 vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
787 num_user_sgprs = SI_TES_NUM_USER_SGPR;
788 } else
789 unreachable("invalid shader selector type");
790
791 oc_lds_en = shader->selector->info.stage == MESA_SHADER_TESS_EVAL ? 1 : 0;
792
793 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
794 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES,
795 S_00B324_MEM_BASE(sscreen->info.address32_hi >> 8));
796 si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
797 S_00B328_VGPRS((shader->config.num_vgprs - 1) / 4) |
798 S_00B328_SGPRS((shader->config.num_sgprs - 1) / 8) |
799 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt) | S_00B328_DX10_CLAMP(1) |
800 S_00B328_FLOAT_MODE(shader->config.float_mode));
801 si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
802 S_00B32C_USER_SGPR(num_user_sgprs) | S_00B32C_OC_LDS_EN(oc_lds_en) |
803 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
804
805 if (shader->selector->info.stage == MESA_SHADER_TESS_EVAL)
806 si_set_tesseval_regs(sscreen, shader->selector, shader);
807
808 polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader);
809 }
810
gfx9_get_gs_info(struct si_shader_selector * es,struct si_shader_selector * gs,struct gfx9_gs_info * out)811 void gfx9_get_gs_info(struct si_shader_selector *es, struct si_shader_selector *gs,
812 struct gfx9_gs_info *out)
813 {
814 unsigned gs_num_invocations = MAX2(gs->info.base.gs.invocations, 1);
815 unsigned input_prim = gs->info.base.gs.input_primitive;
816 bool uses_adjacency =
817 input_prim >= PIPE_PRIM_LINES_ADJACENCY && input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
818
819 /* All these are in dwords: */
820 /* We can't allow using the whole LDS, because GS waves compete with
821 * other shader stages for LDS space. */
822 const unsigned max_lds_size = 8 * 1024;
823 const unsigned esgs_itemsize = es->esgs_itemsize / 4;
824 unsigned esgs_lds_size;
825
826 /* All these are per subgroup: */
827 const unsigned max_out_prims = 32 * 1024;
828 const unsigned max_es_verts = 255;
829 const unsigned ideal_gs_prims = 64;
830 unsigned max_gs_prims, gs_prims;
831 unsigned min_es_verts, es_verts, worst_case_es_verts;
832
833 if (uses_adjacency || gs_num_invocations > 1)
834 max_gs_prims = 127 / gs_num_invocations;
835 else
836 max_gs_prims = 255;
837
838 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
839 * Make sure we don't go over the maximum value.
840 */
841 if (gs->info.base.gs.vertices_out > 0) {
842 max_gs_prims =
843 MIN2(max_gs_prims, max_out_prims / (gs->info.base.gs.vertices_out * gs_num_invocations));
844 }
845 assert(max_gs_prims > 0);
846
847 /* If the primitive has adjacency, halve the number of vertices
848 * that will be reused in multiple primitives.
849 */
850 min_es_verts = gs->gs_input_verts_per_prim / (uses_adjacency ? 2 : 1);
851
852 gs_prims = MIN2(ideal_gs_prims, max_gs_prims);
853 worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
854
855 /* Compute ESGS LDS size based on the worst case number of ES vertices
856 * needed to create the target number of GS prims per subgroup.
857 */
858 esgs_lds_size = esgs_itemsize * worst_case_es_verts;
859
860 /* If total LDS usage is too big, refactor partitions based on ratio
861 * of ESGS item sizes.
862 */
863 if (esgs_lds_size > max_lds_size) {
864 /* Our target GS Prims Per Subgroup was too large. Calculate
865 * the maximum number of GS Prims Per Subgroup that will fit
866 * into LDS, capped by the maximum that the hardware can support.
867 */
868 gs_prims = MIN2((max_lds_size / (esgs_itemsize * min_es_verts)), max_gs_prims);
869 assert(gs_prims > 0);
870 worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
871
872 esgs_lds_size = esgs_itemsize * worst_case_es_verts;
873 assert(esgs_lds_size <= max_lds_size);
874 }
875
876 /* Now calculate remaining ESGS information. */
877 if (esgs_lds_size)
878 es_verts = MIN2(esgs_lds_size / esgs_itemsize, max_es_verts);
879 else
880 es_verts = max_es_verts;
881
882 /* Vertices for adjacency primitives are not always reused, so restore
883 * it for ES_VERTS_PER_SUBGRP.
884 */
885 min_es_verts = gs->gs_input_verts_per_prim;
886
887 /* For normal primitives, the VGT only checks if they are past the ES
888 * verts per subgroup after allocating a full GS primitive and if they
889 * are, kick off a new subgroup. But if those additional ES verts are
890 * unique (e.g. not reused) we need to make sure there is enough LDS
891 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
892 */
893 es_verts -= min_es_verts - 1;
894
895 out->es_verts_per_subgroup = es_verts;
896 out->gs_prims_per_subgroup = gs_prims;
897 out->gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations;
898 out->max_prims_per_subgroup = out->gs_inst_prims_in_subgroup * gs->info.base.gs.vertices_out;
899 out->esgs_ring_size = esgs_lds_size;
900
901 assert(out->max_prims_per_subgroup <= max_out_prims);
902 }
903
si_emit_shader_gs(struct si_context * sctx)904 static void si_emit_shader_gs(struct si_context *sctx)
905 {
906 struct si_shader *shader = sctx->queued.named.gs;
907 if (!shader)
908 return;
909
910 radeon_begin(&sctx->gfx_cs);
911
912 /* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2
913 * R_028A68_VGT_GSVS_RING_OFFSET_3 */
914 radeon_opt_set_context_reg3(
915 sctx, R_028A60_VGT_GSVS_RING_OFFSET_1, SI_TRACKED_VGT_GSVS_RING_OFFSET_1,
916 shader->ctx_reg.gs.vgt_gsvs_ring_offset_1, shader->ctx_reg.gs.vgt_gsvs_ring_offset_2,
917 shader->ctx_reg.gs.vgt_gsvs_ring_offset_3);
918
919 /* R_028AB0_VGT_GSVS_RING_ITEMSIZE */
920 radeon_opt_set_context_reg(sctx, R_028AB0_VGT_GSVS_RING_ITEMSIZE,
921 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE,
922 shader->ctx_reg.gs.vgt_gsvs_ring_itemsize);
923
924 /* R_028B38_VGT_GS_MAX_VERT_OUT */
925 radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, SI_TRACKED_VGT_GS_MAX_VERT_OUT,
926 shader->ctx_reg.gs.vgt_gs_max_vert_out);
927
928 /* R_028B5C_VGT_GS_VERT_ITEMSIZE, R_028B60_VGT_GS_VERT_ITEMSIZE_1
929 * R_028B64_VGT_GS_VERT_ITEMSIZE_2, R_028B68_VGT_GS_VERT_ITEMSIZE_3 */
930 radeon_opt_set_context_reg4(
931 sctx, R_028B5C_VGT_GS_VERT_ITEMSIZE, SI_TRACKED_VGT_GS_VERT_ITEMSIZE,
932 shader->ctx_reg.gs.vgt_gs_vert_itemsize, shader->ctx_reg.gs.vgt_gs_vert_itemsize_1,
933 shader->ctx_reg.gs.vgt_gs_vert_itemsize_2, shader->ctx_reg.gs.vgt_gs_vert_itemsize_3);
934
935 /* R_028B90_VGT_GS_INSTANCE_CNT */
936 radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT, SI_TRACKED_VGT_GS_INSTANCE_CNT,
937 shader->ctx_reg.gs.vgt_gs_instance_cnt);
938
939 if (sctx->chip_class >= GFX9) {
940 /* R_028A44_VGT_GS_ONCHIP_CNTL */
941 radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, SI_TRACKED_VGT_GS_ONCHIP_CNTL,
942 shader->ctx_reg.gs.vgt_gs_onchip_cntl);
943 /* R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP */
944 radeon_opt_set_context_reg(sctx, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
945 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
946 shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup);
947 /* R_028AAC_VGT_ESGS_RING_ITEMSIZE */
948 radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
949 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
950 shader->ctx_reg.gs.vgt_esgs_ring_itemsize);
951
952 if (shader->key.ge.part.gs.es->info.stage == MESA_SHADER_TESS_EVAL)
953 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, SI_TRACKED_VGT_TF_PARAM,
954 shader->vgt_tf_param);
955 if (shader->vgt_vertex_reuse_block_cntl)
956 radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
957 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
958 shader->vgt_vertex_reuse_block_cntl);
959 }
960 radeon_end_update_context_roll(sctx);
961
962 /* These don't cause any context rolls. */
963 if (sctx->screen->info.spi_cu_en_has_effect) {
964 if (sctx->chip_class >= GFX7) {
965 ac_set_reg_cu_en(&sctx->gfx_cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
966 shader->ctx_reg.gs.spi_shader_pgm_rsrc3_gs,
967 C_00B21C_CU_EN, 0, &sctx->screen->info,
968 (void (*)(void*, unsigned, uint32_t))
969 (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
970 sctx->tracked_regs.reg_saved &= ~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS);
971 }
972 if (sctx->chip_class >= GFX10) {
973 ac_set_reg_cu_en(&sctx->gfx_cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
974 shader->ctx_reg.gs.spi_shader_pgm_rsrc4_gs,
975 C_00B204_CU_EN, 16, &sctx->screen->info,
976 (void (*)(void*, unsigned, uint32_t))
977 (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
978 sctx->tracked_regs.reg_saved &= ~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS);
979 }
980 } else {
981 radeon_begin_again(&sctx->gfx_cs);
982 if (sctx->chip_class >= GFX7) {
983 radeon_opt_set_sh_reg_idx3(sctx, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
984 SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS,
985 shader->ctx_reg.gs.spi_shader_pgm_rsrc3_gs);
986 }
987 if (sctx->chip_class >= GFX10) {
988 radeon_opt_set_sh_reg_idx3(sctx, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
989 SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS,
990 shader->ctx_reg.gs.spi_shader_pgm_rsrc4_gs);
991 }
992 radeon_end();
993 }
994 }
995
si_shader_gs(struct si_screen * sscreen,struct si_shader * shader)996 static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
997 {
998 struct si_shader_selector *sel = shader->selector;
999 const ubyte *num_components = sel->info.num_stream_output_components;
1000 unsigned gs_num_invocations = sel->info.base.gs.invocations;
1001 struct si_pm4_state *pm4;
1002 uint64_t va;
1003 unsigned max_stream = util_last_bit(sel->info.base.gs.active_stream_mask);
1004 unsigned offset;
1005
1006 pm4 = si_get_shader_pm4_state(shader);
1007 if (!pm4)
1008 return;
1009
1010 pm4->atom.emit = si_emit_shader_gs;
1011
1012 offset = num_components[0] * sel->info.base.gs.vertices_out;
1013 shader->ctx_reg.gs.vgt_gsvs_ring_offset_1 = offset;
1014
1015 if (max_stream >= 2)
1016 offset += num_components[1] * sel->info.base.gs.vertices_out;
1017 shader->ctx_reg.gs.vgt_gsvs_ring_offset_2 = offset;
1018
1019 if (max_stream >= 3)
1020 offset += num_components[2] * sel->info.base.gs.vertices_out;
1021 shader->ctx_reg.gs.vgt_gsvs_ring_offset_3 = offset;
1022
1023 if (max_stream >= 4)
1024 offset += num_components[3] * sel->info.base.gs.vertices_out;
1025 shader->ctx_reg.gs.vgt_gsvs_ring_itemsize = offset;
1026
1027 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
1028 assert(offset < (1 << 15));
1029
1030 shader->ctx_reg.gs.vgt_gs_max_vert_out = sel->info.base.gs.vertices_out;
1031
1032 shader->ctx_reg.gs.vgt_gs_vert_itemsize = num_components[0];
1033 shader->ctx_reg.gs.vgt_gs_vert_itemsize_1 = (max_stream >= 2) ? num_components[1] : 0;
1034 shader->ctx_reg.gs.vgt_gs_vert_itemsize_2 = (max_stream >= 3) ? num_components[2] : 0;
1035 shader->ctx_reg.gs.vgt_gs_vert_itemsize_3 = (max_stream >= 4) ? num_components[3] : 0;
1036
1037 shader->ctx_reg.gs.vgt_gs_instance_cnt =
1038 S_028B90_CNT(MIN2(gs_num_invocations, 127)) | S_028B90_ENABLE(gs_num_invocations > 0);
1039
1040 /* Copy over fields from the GS copy shader to make them easily accessible from GS. */
1041 shader->pa_cl_vs_out_cntl = shader->gs_copy_shader->pa_cl_vs_out_cntl;
1042
1043 va = shader->bo->gpu_address;
1044
1045 if (sscreen->info.chip_class >= GFX9) {
1046 unsigned input_prim = sel->info.base.gs.input_primitive;
1047 gl_shader_stage es_stage = shader->key.ge.part.gs.es->info.stage;
1048 unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
1049
1050 if (es_stage == MESA_SHADER_VERTEX) {
1051 es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
1052 } else if (es_stage == MESA_SHADER_TESS_EVAL)
1053 es_vgpr_comp_cnt = shader->key.ge.part.gs.es->info.uses_primid ? 3 : 2;
1054 else
1055 unreachable("invalid shader selector type");
1056
1057 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
1058 * VGPR[0:4] are always loaded.
1059 */
1060 if (sel->info.uses_invocationid)
1061 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
1062 else if (sel->info.uses_primid)
1063 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
1064 else if (input_prim >= PIPE_PRIM_TRIANGLES)
1065 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
1066 else
1067 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
1068
1069 unsigned num_user_sgprs;
1070 if (es_stage == MESA_SHADER_VERTEX)
1071 num_user_sgprs = si_get_num_vs_user_sgprs(shader, GFX9_GS_NUM_USER_SGPR);
1072 else
1073 num_user_sgprs = GFX9_GS_NUM_USER_SGPR;
1074
1075 if (sscreen->info.chip_class >= GFX10) {
1076 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
1077 } else {
1078 si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
1079 }
1080
1081 uint32_t rsrc1 = S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | S_00B228_DX10_CLAMP(1) |
1082 S_00B228_MEM_ORDERED(si_shader_mem_ordered(shader)) |
1083 S_00B228_WGP_MODE(sscreen->info.chip_class >= GFX10) |
1084 S_00B228_FLOAT_MODE(shader->config.float_mode) |
1085 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
1086 uint32_t rsrc2 = S_00B22C_USER_SGPR(num_user_sgprs) |
1087 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
1088 S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL) |
1089 S_00B22C_LDS_SIZE(shader->config.lds_size) |
1090 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
1091
1092 if (sscreen->info.chip_class >= GFX10) {
1093 rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
1094 } else {
1095 rsrc1 |= S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8);
1096 rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
1097 }
1098
1099 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, rsrc1);
1100 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, rsrc2);
1101
1102 shader->ctx_reg.gs.spi_shader_pgm_rsrc3_gs = S_00B21C_CU_EN(0xffff) |
1103 S_00B21C_WAVE_LIMIT(0x3F);
1104 shader->ctx_reg.gs.spi_shader_pgm_rsrc4_gs =
1105 S_00B204_CU_EN(0xffff) | S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(0);
1106
1107 shader->ctx_reg.gs.vgt_gs_onchip_cntl =
1108 S_028A44_ES_VERTS_PER_SUBGRP(shader->gs_info.es_verts_per_subgroup) |
1109 S_028A44_GS_PRIMS_PER_SUBGRP(shader->gs_info.gs_prims_per_subgroup) |
1110 S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->gs_info.gs_inst_prims_in_subgroup);
1111 shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup =
1112 S_028A94_MAX_PRIMS_PER_SUBGROUP(shader->gs_info.max_prims_per_subgroup);
1113 shader->ctx_reg.gs.vgt_esgs_ring_itemsize = shader->key.ge.part.gs.es->esgs_itemsize / 4;
1114
1115 if (es_stage == MESA_SHADER_TESS_EVAL)
1116 si_set_tesseval_regs(sscreen, shader->key.ge.part.gs.es, shader);
1117
1118 polaris_set_vgt_vertex_reuse(sscreen, shader->key.ge.part.gs.es, shader);
1119 } else {
1120 shader->ctx_reg.gs.spi_shader_pgm_rsrc3_gs = S_00B21C_CU_EN(0xffff) |
1121 S_00B21C_WAVE_LIMIT(0x3F);
1122
1123 si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
1124 si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS,
1125 S_00B224_MEM_BASE(sscreen->info.address32_hi >> 8));
1126
1127 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
1128 S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
1129 S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
1130 S_00B228_DX10_CLAMP(1) | S_00B228_FLOAT_MODE(shader->config.float_mode));
1131 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
1132 S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR) |
1133 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
1134 }
1135 }
1136
gfx10_is_ngg_passthrough(struct si_shader * shader)1137 bool gfx10_is_ngg_passthrough(struct si_shader *shader)
1138 {
1139 struct si_shader_selector *sel = shader->selector;
1140
1141 /* Never use NGG passthrough if culling is possible even when it's not used by this shader,
1142 * so that we don't get context rolls when enabling and disabling NGG passthrough.
1143 */
1144 if (sel->screen->use_ngg_culling)
1145 return false;
1146
1147 /* The definition of NGG passthrough is:
1148 * - user GS is turned off (no amplification, no GS instancing, and no culling)
1149 * - VGT_ESGS_RING_ITEMSIZE is ignored (behaving as if it was equal to 1)
1150 * - vertex indices are packed into 1 VGPR
1151 * - Dimgrey and later chips can optionally skip the gs_alloc_req message
1152 *
1153 * NGG passthrough still allows the use of LDS.
1154 */
1155 return sel->info.stage != MESA_SHADER_GEOMETRY && !shader->key.ge.opt.ngg_culling;
1156 }
1157
1158 /* Common tail code for NGG primitive shaders. */
gfx10_emit_shader_ngg_tail(struct si_context * sctx,struct si_shader * shader)1159 static void gfx10_emit_shader_ngg_tail(struct si_context *sctx, struct si_shader *shader)
1160 {
1161 radeon_begin(&sctx->gfx_cs);
1162 radeon_opt_set_context_reg(sctx, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
1163 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP,
1164 shader->ctx_reg.ngg.ge_max_output_per_subgroup);
1165 radeon_opt_set_context_reg(sctx, R_028B4C_GE_NGG_SUBGRP_CNTL, SI_TRACKED_GE_NGG_SUBGRP_CNTL,
1166 shader->ctx_reg.ngg.ge_ngg_subgrp_cntl);
1167 radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN, SI_TRACKED_VGT_PRIMITIVEID_EN,
1168 shader->ctx_reg.ngg.vgt_primitiveid_en);
1169 radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, SI_TRACKED_VGT_GS_ONCHIP_CNTL,
1170 shader->ctx_reg.ngg.vgt_gs_onchip_cntl);
1171 radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT, SI_TRACKED_VGT_GS_INSTANCE_CNT,
1172 shader->ctx_reg.ngg.vgt_gs_instance_cnt);
1173 radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
1174 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
1175 shader->ctx_reg.ngg.vgt_esgs_ring_itemsize);
1176 radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG, SI_TRACKED_SPI_VS_OUT_CONFIG,
1177 shader->ctx_reg.ngg.spi_vs_out_config);
1178 radeon_opt_set_context_reg2(
1179 sctx, R_028708_SPI_SHADER_IDX_FORMAT, SI_TRACKED_SPI_SHADER_IDX_FORMAT,
1180 shader->ctx_reg.ngg.spi_shader_idx_format, shader->ctx_reg.ngg.spi_shader_pos_format);
1181 radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL, SI_TRACKED_PA_CL_VTE_CNTL,
1182 shader->ctx_reg.ngg.pa_cl_vte_cntl);
1183 radeon_opt_set_context_reg(sctx, R_028838_PA_CL_NGG_CNTL, SI_TRACKED_PA_CL_NGG_CNTL,
1184 shader->ctx_reg.ngg.pa_cl_ngg_cntl);
1185
1186 radeon_end_update_context_roll(sctx);
1187
1188 /* These don't cause a context roll. */
1189 radeon_begin_again(&sctx->gfx_cs);
1190 radeon_opt_set_uconfig_reg(sctx, R_030980_GE_PC_ALLOC, SI_TRACKED_GE_PC_ALLOC,
1191 shader->ctx_reg.ngg.ge_pc_alloc);
1192 if (sctx->screen->info.spi_cu_en_has_effect) {
1193 radeon_end();
1194 ac_set_reg_cu_en(&sctx->gfx_cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
1195 shader->ctx_reg.ngg.spi_shader_pgm_rsrc3_gs,
1196 C_00B21C_CU_EN, 0, &sctx->screen->info,
1197 (void (*)(void*, unsigned, uint32_t))
1198 (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
1199 ac_set_reg_cu_en(&sctx->gfx_cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
1200 shader->ctx_reg.ngg.spi_shader_pgm_rsrc4_gs,
1201 C_00B204_CU_EN, 16, &sctx->screen->info,
1202 (void (*)(void*, unsigned, uint32_t))
1203 (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
1204 sctx->tracked_regs.reg_saved &= ~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS) &
1205 ~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS);
1206 } else {
1207 radeon_opt_set_sh_reg_idx3(sctx, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
1208 SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS,
1209 shader->ctx_reg.ngg.spi_shader_pgm_rsrc3_gs);
1210 radeon_opt_set_sh_reg_idx3(sctx, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
1211 SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS,
1212 shader->ctx_reg.ngg.spi_shader_pgm_rsrc4_gs);
1213 radeon_end();
1214 }
1215 }
1216
gfx10_emit_shader_ngg_notess_nogs(struct si_context * sctx)1217 static void gfx10_emit_shader_ngg_notess_nogs(struct si_context *sctx)
1218 {
1219 struct si_shader *shader = sctx->queued.named.gs;
1220 if (!shader)
1221 return;
1222
1223 gfx10_emit_shader_ngg_tail(sctx, shader);
1224 }
1225
gfx10_emit_shader_ngg_tess_nogs(struct si_context * sctx)1226 static void gfx10_emit_shader_ngg_tess_nogs(struct si_context *sctx)
1227 {
1228 struct si_shader *shader = sctx->queued.named.gs;
1229 if (!shader)
1230 return;
1231
1232 radeon_begin(&sctx->gfx_cs);
1233 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, SI_TRACKED_VGT_TF_PARAM,
1234 shader->vgt_tf_param);
1235 radeon_end_update_context_roll(sctx);
1236
1237 gfx10_emit_shader_ngg_tail(sctx, shader);
1238 }
1239
gfx10_emit_shader_ngg_notess_gs(struct si_context * sctx)1240 static void gfx10_emit_shader_ngg_notess_gs(struct si_context *sctx)
1241 {
1242 struct si_shader *shader = sctx->queued.named.gs;
1243 if (!shader)
1244 return;
1245
1246 radeon_begin(&sctx->gfx_cs);
1247 radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, SI_TRACKED_VGT_GS_MAX_VERT_OUT,
1248 shader->ctx_reg.ngg.vgt_gs_max_vert_out);
1249 radeon_end_update_context_roll(sctx);
1250
1251 gfx10_emit_shader_ngg_tail(sctx, shader);
1252 }
1253
gfx10_emit_shader_ngg_tess_gs(struct si_context * sctx)1254 static void gfx10_emit_shader_ngg_tess_gs(struct si_context *sctx)
1255 {
1256 struct si_shader *shader = sctx->queued.named.gs;
1257
1258 if (!shader)
1259 return;
1260
1261 radeon_begin(&sctx->gfx_cs);
1262 radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, SI_TRACKED_VGT_GS_MAX_VERT_OUT,
1263 shader->ctx_reg.ngg.vgt_gs_max_vert_out);
1264 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, SI_TRACKED_VGT_TF_PARAM,
1265 shader->vgt_tf_param);
1266 radeon_end_update_context_roll(sctx);
1267
1268 gfx10_emit_shader_ngg_tail(sctx, shader);
1269 }
1270
si_get_input_prim(const struct si_shader_selector * gs,const union si_shader_key * key)1271 unsigned si_get_input_prim(const struct si_shader_selector *gs, const union si_shader_key *key)
1272 {
1273 if (gs->info.stage == MESA_SHADER_GEOMETRY)
1274 return gs->info.base.gs.input_primitive;
1275
1276 if (gs->info.stage == MESA_SHADER_TESS_EVAL) {
1277 if (gs->info.base.tess.point_mode)
1278 return PIPE_PRIM_POINTS;
1279 if (gs->info.base.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES)
1280 return PIPE_PRIM_LINES;
1281 return PIPE_PRIM_TRIANGLES;
1282 }
1283
1284 if (key->ge.opt.ngg_culling & SI_NGG_CULL_LINES)
1285 return PIPE_PRIM_LINES;
1286
1287 return PIPE_PRIM_TRIANGLES; /* worst case for all callers */
1288 }
1289
si_get_vs_out_cntl(const struct si_shader_selector * sel,const struct si_shader * shader,bool ngg)1290 static unsigned si_get_vs_out_cntl(const struct si_shader_selector *sel,
1291 const struct si_shader *shader, bool ngg)
1292 {
1293 /* Clip distances can be killed, but cull distances can't. */
1294 unsigned clipcull_mask = (sel->clipdist_mask & ~shader->key.ge.opt.kill_clip_distances) |
1295 sel->culldist_mask;
1296 bool writes_psize = sel->info.writes_psize && !shader->key.ge.opt.kill_pointsize;
1297 bool misc_vec_ena = writes_psize || (sel->info.writes_edgeflag && !ngg) ||
1298 sel->screen->options.vrs2x2 ||
1299 sel->info.writes_layer || sel->info.writes_viewport_index;
1300
1301 return S_02881C_VS_OUT_CCDIST0_VEC_ENA((clipcull_mask & 0x0F) != 0) |
1302 S_02881C_VS_OUT_CCDIST1_VEC_ENA((clipcull_mask & 0xF0) != 0) |
1303 S_02881C_USE_VTX_POINT_SIZE(writes_psize) |
1304 S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag && !ngg) |
1305 S_02881C_USE_VTX_VRS_RATE(sel->screen->options.vrs2x2) |
1306 S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
1307 S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
1308 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
1309 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
1310 }
1311
1312 /**
1313 * Prepare the PM4 image for \p shader, which will run as a merged ESGS shader
1314 * in NGG mode.
1315 */
gfx10_shader_ngg(struct si_screen * sscreen,struct si_shader * shader)1316 static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader)
1317 {
1318 const struct si_shader_selector *gs_sel = shader->selector;
1319 const struct si_shader_info *gs_info = &gs_sel->info;
1320 const gl_shader_stage gs_stage = shader->selector->info.stage;
1321 const struct si_shader_selector *es_sel =
1322 shader->previous_stage_sel ? shader->previous_stage_sel : shader->selector;
1323 const struct si_shader_info *es_info = &es_sel->info;
1324 const gl_shader_stage es_stage = es_sel->info.stage;
1325 unsigned num_user_sgprs;
1326 unsigned nparams, es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
1327 uint64_t va;
1328 bool window_space = gs_info->stage == MESA_SHADER_VERTEX ?
1329 gs_info->base.vs.window_space_position : 0;
1330 bool es_enable_prim_id = shader->key.ge.mono.u.vs_export_prim_id || es_info->uses_primid;
1331 unsigned gs_num_invocations = MAX2(gs_sel->info.base.gs.invocations, 1);
1332 unsigned input_prim = si_get_input_prim(gs_sel, &shader->key);
1333 bool break_wave_at_eoi = false;
1334 struct si_pm4_state *pm4 = si_get_shader_pm4_state(shader);
1335 if (!pm4)
1336 return;
1337
1338 if (es_stage == MESA_SHADER_TESS_EVAL) {
1339 pm4->atom.emit = gs_stage == MESA_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_tess_gs
1340 : gfx10_emit_shader_ngg_tess_nogs;
1341 } else {
1342 pm4->atom.emit = gs_stage == MESA_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_notess_gs
1343 : gfx10_emit_shader_ngg_notess_nogs;
1344 }
1345
1346 va = shader->bo->gpu_address;
1347
1348 if (es_stage == MESA_SHADER_VERTEX) {
1349 es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
1350
1351 if (es_info->base.vs.blit_sgprs_amd) {
1352 num_user_sgprs =
1353 SI_SGPR_VS_BLIT_DATA + es_info->base.vs.blit_sgprs_amd;
1354 } else {
1355 num_user_sgprs = si_get_num_vs_user_sgprs(shader, GFX9_GS_NUM_USER_SGPR);
1356 }
1357 } else {
1358 assert(es_stage == MESA_SHADER_TESS_EVAL);
1359 es_vgpr_comp_cnt = es_enable_prim_id ? 3 : 2;
1360 num_user_sgprs = GFX9_GS_NUM_USER_SGPR;
1361
1362 if (es_enable_prim_id || gs_info->uses_primid)
1363 break_wave_at_eoi = true;
1364 }
1365
1366 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
1367 * VGPR[0:4] are always loaded.
1368 *
1369 * Vertex shaders always need to load VGPR3, because they need to
1370 * pass edge flags for decomposed primitives (such as quads) to the PA
1371 * for the GL_LINE polygon mode to skip rendering lines on inner edges.
1372 */
1373 if (gs_info->uses_invocationid ||
1374 (gfx10_edgeflags_have_effect(shader) && !gfx10_is_ngg_passthrough(shader)))
1375 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID, edge flags. */
1376 else if ((gs_stage == MESA_SHADER_GEOMETRY && gs_info->uses_primid) ||
1377 (gs_stage == MESA_SHADER_VERTEX && shader->key.ge.mono.u.vs_export_prim_id))
1378 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
1379 else if (input_prim >= PIPE_PRIM_TRIANGLES && !gfx10_is_ngg_passthrough(shader))
1380 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
1381 else
1382 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
1383
1384 unsigned late_alloc_wave64, cu_mask;
1385
1386 ac_compute_late_alloc(&sscreen->info, true, shader->key.ge.opt.ngg_culling,
1387 shader->config.scratch_bytes_per_wave > 0,
1388 &late_alloc_wave64, &cu_mask);
1389
1390 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
1391 si_pm4_set_reg(
1392 pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
1393 S_00B228_VGPRS((shader->config.num_vgprs - 1) / (shader->wave_size == 32 ? 8 : 4)) |
1394 S_00B228_FLOAT_MODE(shader->config.float_mode) | S_00B228_DX10_CLAMP(1) |
1395 S_00B228_MEM_ORDERED(si_shader_mem_ordered(shader)) |
1396 /* Disable the WGP mode on gfx10.3 because it can hang. (it happened on VanGogh)
1397 * Let's disable it on all chips that disable exactly 1 CU per SA for GS. */
1398 S_00B228_WGP_MODE(sscreen->info.chip_class == GFX10) |
1399 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
1400 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
1401 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0) |
1402 S_00B22C_USER_SGPR(num_user_sgprs) |
1403 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
1404 S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5) |
1405 S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL) |
1406 S_00B22C_LDS_SIZE(shader->config.lds_size));
1407
1408 shader->ctx_reg.ngg.spi_shader_pgm_rsrc3_gs = S_00B21C_CU_EN(cu_mask) |
1409 S_00B21C_WAVE_LIMIT(0x3F);
1410 shader->ctx_reg.ngg.spi_shader_pgm_rsrc4_gs =
1411 S_00B204_CU_EN(0xffff) | S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(late_alloc_wave64);
1412
1413 nparams = MAX2(shader->info.nr_param_exports, 1);
1414 shader->ctx_reg.ngg.spi_vs_out_config =
1415 S_0286C4_VS_EXPORT_COUNT(nparams - 1) |
1416 S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0);
1417
1418 shader->ctx_reg.ngg.spi_shader_idx_format =
1419 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP);
1420 shader->ctx_reg.ngg.spi_shader_pos_format =
1421 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
1422 S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? V_02870C_SPI_SHADER_4COMP
1423 : V_02870C_SPI_SHADER_NONE) |
1424 S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ? V_02870C_SPI_SHADER_4COMP
1425 : V_02870C_SPI_SHADER_NONE) |
1426 S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? V_02870C_SPI_SHADER_4COMP
1427 : V_02870C_SPI_SHADER_NONE);
1428
1429 shader->ctx_reg.ngg.vgt_primitiveid_en =
1430 S_028A84_PRIMITIVEID_EN(es_enable_prim_id) |
1431 S_028A84_NGG_DISABLE_PROVOK_REUSE(shader->key.ge.mono.u.vs_export_prim_id ||
1432 gs_sel->info.writes_primid);
1433
1434 if (gs_stage == MESA_SHADER_GEOMETRY) {
1435 shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = es_sel->esgs_itemsize / 4;
1436 shader->ctx_reg.ngg.vgt_gs_max_vert_out = gs_sel->info.base.gs.vertices_out;
1437 } else {
1438 shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = 1;
1439 }
1440
1441 if (es_stage == MESA_SHADER_TESS_EVAL)
1442 si_set_tesseval_regs(sscreen, es_sel, shader);
1443
1444 shader->ctx_reg.ngg.vgt_gs_onchip_cntl =
1445 S_028A44_ES_VERTS_PER_SUBGRP(shader->ngg.hw_max_esverts) |
1446 S_028A44_GS_PRIMS_PER_SUBGRP(shader->ngg.max_gsprims) |
1447 S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->ngg.max_gsprims * gs_num_invocations);
1448 shader->ctx_reg.ngg.ge_max_output_per_subgroup =
1449 S_0287FC_MAX_VERTS_PER_SUBGROUP(shader->ngg.max_out_verts);
1450 shader->ctx_reg.ngg.ge_ngg_subgrp_cntl = S_028B4C_PRIM_AMP_FACTOR(shader->ngg.prim_amp_factor) |
1451 S_028B4C_THDS_PER_SUBGRP(0); /* for fast launch */
1452 shader->ctx_reg.ngg.vgt_gs_instance_cnt =
1453 S_028B90_CNT(gs_num_invocations) | S_028B90_ENABLE(gs_num_invocations > 1) |
1454 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(shader->ngg.max_vert_out_per_gs_instance);
1455
1456 /* Output hw-generated edge flags if needed and pass them via the prim
1457 * export to prevent drawing lines on internal edges of decomposed
1458 * primitives (such as quads) with polygon mode = lines.
1459 */
1460 shader->ctx_reg.ngg.pa_cl_ngg_cntl =
1461 S_028838_INDEX_BUF_EDGE_FLAG_ENA(gfx10_edgeflags_have_effect(shader)) |
1462 /* Reuse for NGG. */
1463 S_028838_VERTEX_REUSE_DEPTH(sscreen->info.chip_class >= GFX10_3 ? 30 : 0);
1464 shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, shader, true);
1465
1466 /* Oversubscribe PC. This improves performance when there are too many varyings. */
1467 unsigned oversub_pc_factor = 1;
1468
1469 if (shader->key.ge.opt.ngg_culling) {
1470 /* Be more aggressive with NGG culling. */
1471 if (shader->info.nr_param_exports > 4)
1472 oversub_pc_factor = 4;
1473 else if (shader->info.nr_param_exports > 2)
1474 oversub_pc_factor = 3;
1475 else
1476 oversub_pc_factor = 2;
1477 }
1478
1479 unsigned oversub_pc_lines =
1480 late_alloc_wave64 ? (sscreen->info.pc_lines / 4) * oversub_pc_factor : 0;
1481 shader->ctx_reg.ngg.ge_pc_alloc = S_030980_OVERSUB_EN(oversub_pc_lines > 0) |
1482 S_030980_NUM_PC_LINES(oversub_pc_lines - 1);
1483
1484 shader->ge_cntl = S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) |
1485 S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts) |
1486 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
1487
1488 /* On gfx10, the GE only checks against the maximum number of ES verts after
1489 * allocating a full GS primitive. So we need to ensure that whenever
1490 * this check passes, there is enough space for a full primitive without
1491 * vertex reuse. VERT_GRP_SIZE=256 doesn't need this. We should always get 256
1492 * if we have enough LDS.
1493 *
1494 * Tessellation is unaffected because it always sets GE_CNTL.VERT_GRP_SIZE = 0.
1495 */
1496 if ((sscreen->info.chip_class == GFX10) &&
1497 (es_stage == MESA_SHADER_VERTEX || gs_stage == MESA_SHADER_VERTEX) && /* = no tess */
1498 shader->ngg.hw_max_esverts != 256 &&
1499 shader->ngg.hw_max_esverts > 5) {
1500 /* This could be based on the input primitive type. 5 is the worst case
1501 * for primitive types with adjacency.
1502 */
1503 shader->ge_cntl &= C_03096C_VERT_GRP_SIZE;
1504 shader->ge_cntl |= S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts - 5);
1505 }
1506
1507 if (window_space) {
1508 shader->ctx_reg.ngg.pa_cl_vte_cntl = S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
1509 } else {
1510 shader->ctx_reg.ngg.pa_cl_vte_cntl =
1511 S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
1512 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
1513 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
1514 }
1515
1516 shader->ctx_reg.ngg.vgt_stages.u.ngg = 1;
1517 shader->ctx_reg.ngg.vgt_stages.u.streamout = gs_sel->so.num_outputs;
1518 shader->ctx_reg.ngg.vgt_stages.u.ngg_passthrough = gfx10_is_ngg_passthrough(shader);
1519 shader->ctx_reg.ngg.vgt_stages.u.gs_wave32 = shader->wave_size == 32;
1520 }
1521
si_emit_shader_vs(struct si_context * sctx)1522 static void si_emit_shader_vs(struct si_context *sctx)
1523 {
1524 struct si_shader *shader = sctx->queued.named.vs;
1525 if (!shader)
1526 return;
1527
1528 radeon_begin(&sctx->gfx_cs);
1529 radeon_opt_set_context_reg(sctx, R_028A40_VGT_GS_MODE, SI_TRACKED_VGT_GS_MODE,
1530 shader->ctx_reg.vs.vgt_gs_mode);
1531 radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN, SI_TRACKED_VGT_PRIMITIVEID_EN,
1532 shader->ctx_reg.vs.vgt_primitiveid_en);
1533
1534 if (sctx->chip_class <= GFX8) {
1535 radeon_opt_set_context_reg(sctx, R_028AB4_VGT_REUSE_OFF, SI_TRACKED_VGT_REUSE_OFF,
1536 shader->ctx_reg.vs.vgt_reuse_off);
1537 }
1538
1539 radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG, SI_TRACKED_SPI_VS_OUT_CONFIG,
1540 shader->ctx_reg.vs.spi_vs_out_config);
1541
1542 radeon_opt_set_context_reg(sctx, R_02870C_SPI_SHADER_POS_FORMAT,
1543 SI_TRACKED_SPI_SHADER_POS_FORMAT,
1544 shader->ctx_reg.vs.spi_shader_pos_format);
1545
1546 radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL, SI_TRACKED_PA_CL_VTE_CNTL,
1547 shader->ctx_reg.vs.pa_cl_vte_cntl);
1548
1549 if (shader->selector->info.stage == MESA_SHADER_TESS_EVAL)
1550 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, SI_TRACKED_VGT_TF_PARAM,
1551 shader->vgt_tf_param);
1552
1553 if (shader->vgt_vertex_reuse_block_cntl)
1554 radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
1555 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
1556 shader->vgt_vertex_reuse_block_cntl);
1557
1558 /* Required programming for tessellation. (legacy pipeline only) */
1559 if (sctx->chip_class >= GFX10 && shader->selector->info.stage == MESA_SHADER_TESS_EVAL) {
1560 radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
1561 SI_TRACKED_VGT_GS_ONCHIP_CNTL,
1562 S_028A44_ES_VERTS_PER_SUBGRP(250) |
1563 S_028A44_GS_PRIMS_PER_SUBGRP(126) |
1564 S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
1565 }
1566
1567 radeon_end_update_context_roll(sctx);
1568
1569 /* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
1570 if (sctx->chip_class >= GFX10) {
1571 radeon_begin_again(&sctx->gfx_cs);
1572 radeon_opt_set_uconfig_reg(sctx, R_030980_GE_PC_ALLOC, SI_TRACKED_GE_PC_ALLOC,
1573 shader->ctx_reg.vs.ge_pc_alloc);
1574 radeon_end();
1575 }
1576 }
1577
1578 /**
1579 * Compute the state for \p shader, which will run as a vertex shader on the
1580 * hardware.
1581 *
1582 * If \p gs is non-NULL, it points to the geometry shader for which this shader
1583 * is the copy shader.
1584 */
si_shader_vs(struct si_screen * sscreen,struct si_shader * shader,struct si_shader_selector * gs)1585 static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
1586 struct si_shader_selector *gs)
1587 {
1588 const struct si_shader_info *info = &shader->selector->info;
1589 struct si_pm4_state *pm4;
1590 unsigned num_user_sgprs, vgpr_comp_cnt;
1591 uint64_t va;
1592 unsigned nparams, oc_lds_en;
1593 bool window_space = info->stage == MESA_SHADER_VERTEX ?
1594 info->base.vs.window_space_position : 0;
1595 bool enable_prim_id = shader->key.ge.mono.u.vs_export_prim_id || info->uses_primid;
1596
1597 pm4 = si_get_shader_pm4_state(shader);
1598 if (!pm4)
1599 return;
1600
1601 pm4->atom.emit = si_emit_shader_vs;
1602
1603 /* We always write VGT_GS_MODE in the VS state, because every switch
1604 * between different shader pipelines involving a different GS or no
1605 * GS at all involves a switch of the VS (different GS use different
1606 * copy shaders). On the other hand, when the API switches from a GS to
1607 * no GS and then back to the same GS used originally, the GS state is
1608 * not sent again.
1609 */
1610 if (!gs) {
1611 unsigned mode = V_028A40_GS_OFF;
1612
1613 /* PrimID needs GS scenario A. */
1614 if (enable_prim_id)
1615 mode = V_028A40_GS_SCENARIO_A;
1616
1617 shader->ctx_reg.vs.vgt_gs_mode = S_028A40_MODE(mode);
1618 shader->ctx_reg.vs.vgt_primitiveid_en = enable_prim_id;
1619 } else {
1620 shader->ctx_reg.vs.vgt_gs_mode =
1621 ac_vgt_gs_mode(gs->info.base.gs.vertices_out, sscreen->info.chip_class);
1622 shader->ctx_reg.vs.vgt_primitiveid_en = 0;
1623 }
1624
1625 if (sscreen->info.chip_class <= GFX8) {
1626 /* Reuse needs to be set off if we write oViewport. */
1627 shader->ctx_reg.vs.vgt_reuse_off = S_028AB4_REUSE_OFF(info->writes_viewport_index);
1628 }
1629
1630 va = shader->bo->gpu_address;
1631
1632 if (gs) {
1633 vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
1634 num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
1635 } else if (shader->selector->info.stage == MESA_SHADER_VERTEX) {
1636 vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, enable_prim_id);
1637
1638 if (info->base.vs.blit_sgprs_amd) {
1639 num_user_sgprs = SI_SGPR_VS_BLIT_DATA + info->base.vs.blit_sgprs_amd;
1640 } else {
1641 num_user_sgprs = si_get_num_vs_user_sgprs(shader, SI_VS_NUM_USER_SGPR);
1642 }
1643 } else if (shader->selector->info.stage == MESA_SHADER_TESS_EVAL) {
1644 vgpr_comp_cnt = enable_prim_id ? 3 : 2;
1645 num_user_sgprs = SI_TES_NUM_USER_SGPR;
1646 } else
1647 unreachable("invalid shader selector type");
1648
1649 /* VS is required to export at least one param. */
1650 nparams = MAX2(shader->info.nr_param_exports, 1);
1651 shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
1652
1653 if (sscreen->info.chip_class >= GFX10) {
1654 shader->ctx_reg.vs.spi_vs_out_config |=
1655 S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0);
1656 }
1657
1658 shader->ctx_reg.vs.spi_shader_pos_format =
1659 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
1660 S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? V_02870C_SPI_SHADER_4COMP
1661 : V_02870C_SPI_SHADER_NONE) |
1662 S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ? V_02870C_SPI_SHADER_4COMP
1663 : V_02870C_SPI_SHADER_NONE) |
1664 S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? V_02870C_SPI_SHADER_4COMP
1665 : V_02870C_SPI_SHADER_NONE);
1666 unsigned late_alloc_wave64, cu_mask;
1667 ac_compute_late_alloc(&sscreen->info, false, false,
1668 shader->config.scratch_bytes_per_wave > 0,
1669 &late_alloc_wave64, &cu_mask);
1670
1671 shader->ctx_reg.vs.ge_pc_alloc = S_030980_OVERSUB_EN(late_alloc_wave64 > 0) |
1672 S_030980_NUM_PC_LINES(sscreen->info.pc_lines / 4 - 1);
1673 shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, shader, false);
1674
1675 oc_lds_en = shader->selector->info.stage == MESA_SHADER_TESS_EVAL ? 1 : 0;
1676
1677 if (sscreen->info.chip_class >= GFX7) {
1678 ac_set_reg_cu_en(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
1679 S_00B118_CU_EN(cu_mask) | S_00B118_WAVE_LIMIT(0x3F),
1680 C_00B118_CU_EN, 0, &sscreen->info,
1681 (void (*)(void*, unsigned, uint32_t))
1682 (sscreen->info.chip_class >= GFX10 ? si_pm4_set_reg_idx3 : si_pm4_set_reg));
1683 si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(late_alloc_wave64));
1684 }
1685
1686 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
1687 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS,
1688 S_00B124_MEM_BASE(sscreen->info.address32_hi >> 8));
1689
1690 uint32_t rsrc1 =
1691 S_00B128_VGPRS((shader->config.num_vgprs - 1) / (shader->wave_size == 32 ? 8 : 4)) |
1692 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) | S_00B128_DX10_CLAMP(1) |
1693 S_00B128_MEM_ORDERED(si_shader_mem_ordered(shader)) |
1694 S_00B128_FLOAT_MODE(shader->config.float_mode);
1695 uint32_t rsrc2 = S_00B12C_USER_SGPR(num_user_sgprs) | S_00B12C_OC_LDS_EN(oc_lds_en) |
1696 S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
1697
1698 if (sscreen->info.chip_class >= GFX10)
1699 rsrc2 |= S_00B12C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
1700 else if (sscreen->info.chip_class == GFX9)
1701 rsrc2 |= S_00B12C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
1702
1703 if (sscreen->info.chip_class <= GFX9)
1704 rsrc1 |= S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8);
1705
1706 if (!sscreen->use_ngg_streamout) {
1707 rsrc2 |= S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
1708 S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
1709 S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
1710 S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
1711 S_00B12C_SO_EN(!!shader->selector->so.num_outputs);
1712 }
1713
1714 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, rsrc1);
1715 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, rsrc2);
1716
1717 if (window_space)
1718 shader->ctx_reg.vs.pa_cl_vte_cntl = S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
1719 else
1720 shader->ctx_reg.vs.pa_cl_vte_cntl =
1721 S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
1722 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
1723 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
1724
1725 if (shader->selector->info.stage == MESA_SHADER_TESS_EVAL)
1726 si_set_tesseval_regs(sscreen, shader->selector, shader);
1727
1728 polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader);
1729 }
1730
si_get_ps_num_interp(struct si_shader * ps)1731 static unsigned si_get_ps_num_interp(struct si_shader *ps)
1732 {
1733 struct si_shader_info *info = &ps->selector->info;
1734 unsigned num_colors = !!(info->colors_read & 0x0f) + !!(info->colors_read & 0xf0);
1735 unsigned num_interp =
1736 ps->selector->info.num_inputs + (ps->key.ps.part.prolog.color_two_side ? num_colors : 0);
1737
1738 assert(num_interp <= 32);
1739 return MIN2(num_interp, 32);
1740 }
1741
si_get_spi_shader_col_format(struct si_shader * shader)1742 static unsigned si_get_spi_shader_col_format(struct si_shader *shader)
1743 {
1744 unsigned spi_shader_col_format = shader->key.ps.part.epilog.spi_shader_col_format;
1745 unsigned value = 0, num_mrts = 0;
1746 unsigned i, num_targets = (util_last_bit(spi_shader_col_format) + 3) / 4;
1747
1748 /* Remove holes in spi_shader_col_format. */
1749 for (i = 0; i < num_targets; i++) {
1750 unsigned spi_format = (spi_shader_col_format >> (i * 4)) & 0xf;
1751
1752 if (spi_format) {
1753 value |= spi_format << (num_mrts * 4);
1754 num_mrts++;
1755 }
1756 }
1757
1758 return value;
1759 }
1760
si_emit_shader_ps(struct si_context * sctx)1761 static void si_emit_shader_ps(struct si_context *sctx)
1762 {
1763 struct si_shader *shader = sctx->queued.named.ps;
1764 if (!shader)
1765 return;
1766
1767 radeon_begin(&sctx->gfx_cs);
1768 /* R_0286CC_SPI_PS_INPUT_ENA, R_0286D0_SPI_PS_INPUT_ADDR*/
1769 radeon_opt_set_context_reg2(sctx, R_0286CC_SPI_PS_INPUT_ENA, SI_TRACKED_SPI_PS_INPUT_ENA,
1770 shader->ctx_reg.ps.spi_ps_input_ena,
1771 shader->ctx_reg.ps.spi_ps_input_addr);
1772
1773 radeon_opt_set_context_reg(sctx, R_0286E0_SPI_BARYC_CNTL, SI_TRACKED_SPI_BARYC_CNTL,
1774 shader->ctx_reg.ps.spi_baryc_cntl);
1775 radeon_opt_set_context_reg(sctx, R_0286D8_SPI_PS_IN_CONTROL, SI_TRACKED_SPI_PS_IN_CONTROL,
1776 shader->ctx_reg.ps.spi_ps_in_control);
1777
1778 /* R_028710_SPI_SHADER_Z_FORMAT, R_028714_SPI_SHADER_COL_FORMAT */
1779 radeon_opt_set_context_reg2(sctx, R_028710_SPI_SHADER_Z_FORMAT, SI_TRACKED_SPI_SHADER_Z_FORMAT,
1780 shader->ctx_reg.ps.spi_shader_z_format,
1781 shader->ctx_reg.ps.spi_shader_col_format);
1782
1783 radeon_opt_set_context_reg(sctx, R_02823C_CB_SHADER_MASK, SI_TRACKED_CB_SHADER_MASK,
1784 shader->ctx_reg.ps.cb_shader_mask);
1785 radeon_end_update_context_roll(sctx);
1786 }
1787
si_shader_ps(struct si_screen * sscreen,struct si_shader * shader)1788 static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader)
1789 {
1790 struct si_shader_info *info = &shader->selector->info;
1791 struct si_pm4_state *pm4;
1792 unsigned spi_ps_in_control, spi_shader_col_format, cb_shader_mask;
1793 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
1794 uint64_t va;
1795 unsigned input_ena = shader->config.spi_ps_input_ena;
1796
1797 /* we need to enable at least one of them, otherwise we hang the GPU */
1798 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena) || G_0286CC_PERSP_CENTER_ENA(input_ena) ||
1799 G_0286CC_PERSP_CENTROID_ENA(input_ena) || G_0286CC_PERSP_PULL_MODEL_ENA(input_ena) ||
1800 G_0286CC_LINEAR_SAMPLE_ENA(input_ena) || G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
1801 G_0286CC_LINEAR_CENTROID_ENA(input_ena) || G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena));
1802 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
1803 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena) || G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
1804 G_0286CC_PERSP_CENTER_ENA(input_ena) || G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
1805 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena));
1806
1807 /* Validate interpolation optimization flags (read as implications). */
1808 assert(!shader->key.ps.part.prolog.bc_optimize_for_persp ||
1809 (G_0286CC_PERSP_CENTER_ENA(input_ena) && G_0286CC_PERSP_CENTROID_ENA(input_ena)));
1810 assert(!shader->key.ps.part.prolog.bc_optimize_for_linear ||
1811 (G_0286CC_LINEAR_CENTER_ENA(input_ena) && G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
1812 assert(!shader->key.ps.part.prolog.force_persp_center_interp ||
1813 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena) && !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
1814 assert(!shader->key.ps.part.prolog.force_linear_center_interp ||
1815 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena) && !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
1816 assert(!shader->key.ps.part.prolog.force_persp_sample_interp ||
1817 (!G_0286CC_PERSP_CENTER_ENA(input_ena) && !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
1818 assert(!shader->key.ps.part.prolog.force_linear_sample_interp ||
1819 (!G_0286CC_LINEAR_CENTER_ENA(input_ena) && !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
1820
1821 /* Validate cases when the optimizations are off (read as implications). */
1822 assert(shader->key.ps.part.prolog.bc_optimize_for_persp ||
1823 !G_0286CC_PERSP_CENTER_ENA(input_ena) || !G_0286CC_PERSP_CENTROID_ENA(input_ena));
1824 assert(shader->key.ps.part.prolog.bc_optimize_for_linear ||
1825 !G_0286CC_LINEAR_CENTER_ENA(input_ena) || !G_0286CC_LINEAR_CENTROID_ENA(input_ena));
1826
1827 pm4 = si_get_shader_pm4_state(shader);
1828 if (!pm4)
1829 return;
1830
1831 /* If multiple state sets are allowed to be in a bin, break the batch on a new PS. */
1832 if (sscreen->dpbb_allowed &&
1833 (sscreen->pbb_context_states_per_bin > 1 ||
1834 sscreen->pbb_persistent_states_per_bin > 1)) {
1835 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
1836 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1837 }
1838
1839 pm4->atom.emit = si_emit_shader_ps;
1840
1841 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
1842 * Possible vaules:
1843 * 0 -> Position = pixel center
1844 * 1 -> Position = pixel centroid
1845 * 2 -> Position = at sample position
1846 *
1847 * From GLSL 4.5 specification, section 7.1:
1848 * "The variable gl_FragCoord is available as an input variable from
1849 * within fragment shaders and it holds the window relative coordinates
1850 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
1851 * value can be for any location within the pixel, or one of the
1852 * fragment samples. The use of centroid does not further restrict
1853 * this value to be inside the current primitive."
1854 *
1855 * Meaning that centroid has no effect and we can return anything within
1856 * the pixel. Thus, return the value at sample position, because that's
1857 * the most accurate one shaders can get.
1858 */
1859 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
1860
1861 if (info->base.fs.pixel_center_integer)
1862 spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1);
1863
1864 spi_shader_col_format = si_get_spi_shader_col_format(shader);
1865 cb_shader_mask = ac_get_cb_shader_mask(shader->key.ps.part.epilog.spi_shader_col_format);
1866
1867 /* Ensure that some export memory is always allocated, for two reasons:
1868 *
1869 * 1) Correctness: The hardware ignores the EXEC mask if no export
1870 * memory is allocated, so KILL and alpha test do not work correctly
1871 * without this.
1872 * 2) Performance: Every shader needs at least a NULL export, even when
1873 * it writes no color/depth output. The NULL export instruction
1874 * stalls without this setting.
1875 *
1876 * Don't add this to CB_SHADER_MASK.
1877 *
1878 * GFX10 supports pixel shaders without exports by setting both
1879 * the color and Z formats to SPI_SHADER_ZERO. The hw will skip export
1880 * instructions if any are present.
1881 */
1882 if ((sscreen->info.chip_class <= GFX9 || info->base.fs.uses_discard ||
1883 shader->key.ps.part.epilog.alpha_func != PIPE_FUNC_ALWAYS) &&
1884 !spi_shader_col_format && !info->writes_z && !info->writes_stencil &&
1885 !info->writes_samplemask)
1886 spi_shader_col_format = V_028714_SPI_SHADER_32_R;
1887
1888 shader->ctx_reg.ps.spi_ps_input_ena = input_ena;
1889 shader->ctx_reg.ps.spi_ps_input_addr = shader->config.spi_ps_input_addr;
1890
1891 unsigned num_interp = si_get_ps_num_interp(shader);
1892
1893 /* Set interpolation controls. */
1894 spi_ps_in_control = S_0286D8_NUM_INTERP(num_interp) |
1895 S_0286D8_PS_W32_EN(shader->wave_size == 32);
1896
1897 shader->ctx_reg.ps.num_interp = num_interp;
1898 shader->ctx_reg.ps.spi_baryc_cntl = spi_baryc_cntl;
1899 shader->ctx_reg.ps.spi_ps_in_control = spi_ps_in_control;
1900 shader->ctx_reg.ps.spi_shader_z_format =
1901 ac_get_spi_shader_z_format(info->writes_z, info->writes_stencil, info->writes_samplemask);
1902 shader->ctx_reg.ps.spi_shader_col_format = spi_shader_col_format;
1903 shader->ctx_reg.ps.cb_shader_mask = cb_shader_mask;
1904
1905 va = shader->bo->gpu_address;
1906 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
1907 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS,
1908 S_00B024_MEM_BASE(sscreen->info.address32_hi >> 8));
1909
1910 uint32_t rsrc1 =
1911 S_00B028_VGPRS((shader->config.num_vgprs - 1) / (shader->wave_size == 32 ? 8 : 4)) |
1912 S_00B028_DX10_CLAMP(1) | S_00B028_MEM_ORDERED(si_shader_mem_ordered(shader)) |
1913 S_00B028_FLOAT_MODE(shader->config.float_mode);
1914
1915 if (sscreen->info.chip_class < GFX10) {
1916 rsrc1 |= S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8);
1917 }
1918
1919 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, rsrc1);
1920 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
1921 S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) |
1922 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) |
1923 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
1924 }
1925
si_shader_init_pm4_state(struct si_screen * sscreen,struct si_shader * shader)1926 static void si_shader_init_pm4_state(struct si_screen *sscreen, struct si_shader *shader)
1927 {
1928 assert(shader->wave_size);
1929
1930 switch (shader->selector->info.stage) {
1931 case MESA_SHADER_VERTEX:
1932 if (shader->key.ge.as_ls)
1933 si_shader_ls(sscreen, shader);
1934 else if (shader->key.ge.as_es)
1935 si_shader_es(sscreen, shader);
1936 else if (shader->key.ge.as_ngg)
1937 gfx10_shader_ngg(sscreen, shader);
1938 else
1939 si_shader_vs(sscreen, shader, NULL);
1940 break;
1941 case MESA_SHADER_TESS_CTRL:
1942 si_shader_hs(sscreen, shader);
1943 break;
1944 case MESA_SHADER_TESS_EVAL:
1945 if (shader->key.ge.as_es)
1946 si_shader_es(sscreen, shader);
1947 else if (shader->key.ge.as_ngg)
1948 gfx10_shader_ngg(sscreen, shader);
1949 else
1950 si_shader_vs(sscreen, shader, NULL);
1951 break;
1952 case MESA_SHADER_GEOMETRY:
1953 if (shader->key.ge.as_ngg) {
1954 gfx10_shader_ngg(sscreen, shader);
1955 } else {
1956 /* VS must be initialized first because GS uses its fields. */
1957 si_shader_vs(sscreen, shader->gs_copy_shader, shader->selector);
1958 si_shader_gs(sscreen, shader);
1959 }
1960 break;
1961 case MESA_SHADER_FRAGMENT:
1962 si_shader_ps(sscreen, shader);
1963 break;
1964 default:
1965 assert(0);
1966 }
1967 }
1968
si_clear_vs_key_inputs(struct si_context * sctx,union si_shader_key * key,struct si_vs_prolog_bits * prolog_key)1969 static void si_clear_vs_key_inputs(struct si_context *sctx, union si_shader_key *key,
1970 struct si_vs_prolog_bits *prolog_key)
1971 {
1972 prolog_key->instance_divisor_is_one = 0;
1973 prolog_key->instance_divisor_is_fetched = 0;
1974 key->ge.mono.vs_fetch_opencode = 0;
1975 memset(key->ge.mono.vs_fix_fetch, 0, sizeof(key->ge.mono.vs_fix_fetch));
1976 }
1977
si_vs_key_update_inputs(struct si_context * sctx)1978 void si_vs_key_update_inputs(struct si_context *sctx)
1979 {
1980 struct si_shader_selector *vs = sctx->shader.vs.cso;
1981 struct si_vertex_elements *elts = sctx->vertex_elements;
1982 union si_shader_key *key = &sctx->shader.vs.key;
1983
1984 if (!vs)
1985 return;
1986
1987 if (vs->info.base.vs.blit_sgprs_amd) {
1988 si_clear_vs_key_inputs(sctx, key, &key->ge.part.vs.prolog);
1989 key->ge.opt.prefer_mono = 0;
1990 sctx->uses_nontrivial_vs_prolog = false;
1991 return;
1992 }
1993
1994 bool uses_nontrivial_vs_prolog = false;
1995
1996 if (elts->instance_divisor_is_one || elts->instance_divisor_is_fetched)
1997 uses_nontrivial_vs_prolog = true;
1998
1999 key->ge.part.vs.prolog.instance_divisor_is_one = elts->instance_divisor_is_one;
2000 key->ge.part.vs.prolog.instance_divisor_is_fetched = elts->instance_divisor_is_fetched;
2001 key->ge.opt.prefer_mono = elts->instance_divisor_is_fetched;
2002
2003 unsigned count_mask = (1 << vs->info.num_inputs) - 1;
2004 unsigned fix = elts->fix_fetch_always & count_mask;
2005 unsigned opencode = elts->fix_fetch_opencode & count_mask;
2006
2007 if (sctx->vertex_buffer_unaligned & elts->vb_alignment_check_mask) {
2008 uint32_t mask = elts->fix_fetch_unaligned & count_mask;
2009 while (mask) {
2010 unsigned i = u_bit_scan(&mask);
2011 unsigned log_hw_load_size = 1 + ((elts->hw_load_is_dword >> i) & 1);
2012 unsigned vbidx = elts->vertex_buffer_index[i];
2013 struct pipe_vertex_buffer *vb = &sctx->vertex_buffer[vbidx];
2014 unsigned align_mask = (1 << log_hw_load_size) - 1;
2015 if (vb->buffer_offset & align_mask || vb->stride & align_mask) {
2016 fix |= 1 << i;
2017 opencode |= 1 << i;
2018 }
2019 }
2020 }
2021
2022 memset(key->ge.mono.vs_fix_fetch, 0, sizeof(key->ge.mono.vs_fix_fetch));
2023
2024 while (fix) {
2025 unsigned i = u_bit_scan(&fix);
2026 uint8_t fix_fetch = elts->fix_fetch[i];
2027
2028 key->ge.mono.vs_fix_fetch[i].bits = fix_fetch;
2029 if (fix_fetch)
2030 uses_nontrivial_vs_prolog = true;
2031 }
2032 key->ge.mono.vs_fetch_opencode = opencode;
2033 if (opencode)
2034 uses_nontrivial_vs_prolog = true;
2035
2036 sctx->uses_nontrivial_vs_prolog = uses_nontrivial_vs_prolog;
2037
2038 /* draw_vertex_state (display lists) requires a trivial VS prolog that ignores
2039 * the current vertex buffers and vertex elements.
2040 *
2041 * We just computed the prolog key because we needed to set uses_nontrivial_vs_prolog,
2042 * so that we know whether the VS prolog should be updated when we switch from
2043 * draw_vertex_state to draw_vbo. Now clear the VS prolog for draw_vertex_state.
2044 * This should happen rarely because the VS prolog should be trivial in most
2045 * cases.
2046 */
2047 if (uses_nontrivial_vs_prolog && sctx->force_trivial_vs_prolog)
2048 si_clear_vs_key_inputs(sctx, key, &key->ge.part.vs.prolog);
2049 }
2050
si_get_vs_key_inputs(struct si_context * sctx,union si_shader_key * key,struct si_vs_prolog_bits * prolog_key)2051 void si_get_vs_key_inputs(struct si_context *sctx, union si_shader_key *key,
2052 struct si_vs_prolog_bits *prolog_key)
2053 {
2054 prolog_key->instance_divisor_is_one = sctx->shader.vs.key.ge.part.vs.prolog.instance_divisor_is_one;
2055 prolog_key->instance_divisor_is_fetched = sctx->shader.vs.key.ge.part.vs.prolog.instance_divisor_is_fetched;
2056
2057 key->ge.mono.vs_fetch_opencode = sctx->shader.vs.key.ge.mono.vs_fetch_opencode;
2058 memcpy(key->ge.mono.vs_fix_fetch, sctx->shader.vs.key.ge.mono.vs_fix_fetch,
2059 sizeof(key->ge.mono.vs_fix_fetch));
2060 }
2061
si_update_ps_inputs_read_or_disabled(struct si_context * sctx)2062 void si_update_ps_inputs_read_or_disabled(struct si_context *sctx)
2063 {
2064 struct si_shader_selector *ps = sctx->shader.ps.cso;
2065
2066 /* Find out if PS is disabled. */
2067 bool ps_disabled = true;
2068 if (ps) {
2069 bool ps_modifies_zs = ps->info.base.fs.uses_discard || ps->info.writes_z || ps->info.writes_stencil ||
2070 ps->info.writes_samplemask ||
2071 sctx->queued.named.blend->alpha_to_coverage ||
2072 sctx->queued.named.dsa->alpha_func != PIPE_FUNC_ALWAYS;
2073 unsigned ps_colormask = si_get_total_colormask(sctx);
2074
2075 ps_disabled = sctx->queued.named.rasterizer->rasterizer_discard ||
2076 (!ps_colormask && !ps_modifies_zs && !ps->info.base.writes_memory);
2077 }
2078
2079 sctx->ps_inputs_read_or_disabled = ps_disabled ? 0 : ps->inputs_read;
2080 }
2081
si_get_vs_key_outputs(struct si_context * sctx,struct si_shader_selector * vs,union si_shader_key * key)2082 static void si_get_vs_key_outputs(struct si_context *sctx, struct si_shader_selector *vs,
2083 union si_shader_key *key)
2084 {
2085 key->ge.opt.kill_clip_distances = vs->clipdist_mask & ~sctx->queued.named.rasterizer->clip_plane_enable;
2086
2087 /* Find out which VS outputs aren't used by the PS. */
2088 uint64_t outputs_written = vs->outputs_written_before_ps;
2089 uint64_t linked = outputs_written & sctx->ps_inputs_read_or_disabled;
2090
2091 key->ge.opt.kill_outputs = ~linked & outputs_written;
2092 key->ge.opt.ngg_culling = sctx->ngg_culling;
2093 key->ge.mono.u.vs_export_prim_id = vs->info.stage != MESA_SHADER_GEOMETRY &&
2094 sctx->shader.ps.cso && sctx->shader.ps.cso->info.uses_primid;
2095 key->ge.opt.kill_pointsize = vs->info.writes_psize &&
2096 sctx->current_rast_prim != PIPE_PRIM_POINTS &&
2097 !sctx->queued.named.rasterizer->polygon_mode_is_points;
2098 }
2099
si_clear_vs_key_outputs(struct si_context * sctx,struct si_shader_selector * vs,union si_shader_key * key)2100 static void si_clear_vs_key_outputs(struct si_context *sctx, struct si_shader_selector *vs,
2101 union si_shader_key *key)
2102 {
2103 key->ge.opt.kill_clip_distances = 0;
2104 key->ge.opt.kill_outputs = 0;
2105 key->ge.opt.ngg_culling = 0;
2106 key->ge.mono.u.vs_export_prim_id = 0;
2107 key->ge.opt.kill_pointsize = 0;
2108 }
2109
si_ps_key_update_framebuffer(struct si_context * sctx)2110 void si_ps_key_update_framebuffer(struct si_context *sctx)
2111 {
2112 struct si_shader_selector *sel = sctx->shader.ps.cso;
2113 union si_shader_key *key = &sctx->shader.ps.key;
2114
2115 if (!sel)
2116 return;
2117
2118 if (sel->info.color0_writes_all_cbufs &&
2119 sel->info.colors_written == 0x1)
2120 key->ps.part.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
2121 else
2122 key->ps.part.epilog.last_cbuf = 0;
2123
2124 /* ps_uses_fbfetch is true only if the color buffer is bound. */
2125 if (sctx->ps_uses_fbfetch) {
2126 struct pipe_surface *cb0 = sctx->framebuffer.state.cbufs[0];
2127 struct pipe_resource *tex = cb0->texture;
2128
2129 /* 1D textures are allocated and used as 2D on GFX9. */
2130 key->ps.mono.fbfetch_msaa = sctx->framebuffer.nr_samples > 1;
2131 key->ps.mono.fbfetch_is_1D =
2132 sctx->chip_class != GFX9 &&
2133 (tex->target == PIPE_TEXTURE_1D || tex->target == PIPE_TEXTURE_1D_ARRAY);
2134 key->ps.mono.fbfetch_layered =
2135 tex->target == PIPE_TEXTURE_1D_ARRAY || tex->target == PIPE_TEXTURE_2D_ARRAY ||
2136 tex->target == PIPE_TEXTURE_CUBE || tex->target == PIPE_TEXTURE_CUBE_ARRAY ||
2137 tex->target == PIPE_TEXTURE_3D;
2138 } else {
2139 key->ps.mono.fbfetch_msaa = 0;
2140 key->ps.mono.fbfetch_is_1D = 0;
2141 key->ps.mono.fbfetch_layered = 0;
2142 }
2143 }
2144
si_ps_key_update_framebuffer_blend(struct si_context * sctx)2145 void si_ps_key_update_framebuffer_blend(struct si_context *sctx)
2146 {
2147 struct si_shader_selector *sel = sctx->shader.ps.cso;
2148 union si_shader_key *key = &sctx->shader.ps.key;
2149 struct si_state_blend *blend = sctx->queued.named.blend;
2150
2151 if (!sel)
2152 return;
2153
2154 /* Select the shader color format based on whether
2155 * blending or alpha are needed.
2156 */
2157 key->ps.part.epilog.spi_shader_col_format =
2158 (blend->blend_enable_4bit & blend->need_src_alpha_4bit &
2159 sctx->framebuffer.spi_shader_col_format_blend_alpha) |
2160 (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
2161 sctx->framebuffer.spi_shader_col_format_blend) |
2162 (~blend->blend_enable_4bit & blend->need_src_alpha_4bit &
2163 sctx->framebuffer.spi_shader_col_format_alpha) |
2164 (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
2165 sctx->framebuffer.spi_shader_col_format);
2166 key->ps.part.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
2167
2168 /* The output for dual source blending should have
2169 * the same format as the first output.
2170 */
2171 if (blend->dual_src_blend) {
2172 key->ps.part.epilog.spi_shader_col_format |=
2173 (key->ps.part.epilog.spi_shader_col_format & 0xf) << 4;
2174 }
2175
2176 /* If alpha-to-coverage is enabled, we have to export alpha
2177 * even if there is no color buffer.
2178 */
2179 if (!(key->ps.part.epilog.spi_shader_col_format & 0xf) && blend->alpha_to_coverage)
2180 key->ps.part.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
2181
2182 /* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs
2183 * to the range supported by the type if a channel has less
2184 * than 16 bits and the export format is 16_ABGR.
2185 */
2186 if (sctx->chip_class <= GFX7 && sctx->family != CHIP_HAWAII) {
2187 key->ps.part.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
2188 key->ps.part.epilog.color_is_int10 = sctx->framebuffer.color_is_int10;
2189 }
2190
2191 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
2192 if (!key->ps.part.epilog.last_cbuf) {
2193 key->ps.part.epilog.spi_shader_col_format &= sel->colors_written_4bit;
2194 key->ps.part.epilog.color_is_int8 &= sel->info.colors_written;
2195 key->ps.part.epilog.color_is_int10 &= sel->info.colors_written;
2196 }
2197
2198 /* Eliminate shader code computing output values that are unused.
2199 * This enables dead code elimination between shader parts.
2200 * Check if any output is eliminated.
2201 */
2202 if (sel->colors_written_4bit &
2203 ~(sctx->framebuffer.colorbuf_enabled_4bit & blend->cb_target_enabled_4bit))
2204 key->ps.opt.prefer_mono = 1;
2205 else
2206 key->ps.opt.prefer_mono = 0;
2207 }
2208
si_ps_key_update_blend_rasterizer(struct si_context * sctx)2209 void si_ps_key_update_blend_rasterizer(struct si_context *sctx)
2210 {
2211 union si_shader_key *key = &sctx->shader.ps.key;
2212 struct si_state_blend *blend = sctx->queued.named.blend;
2213 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2214
2215 key->ps.part.epilog.alpha_to_one = blend->alpha_to_one && rs->multisample_enable;
2216 }
2217
si_ps_key_update_rasterizer(struct si_context * sctx)2218 void si_ps_key_update_rasterizer(struct si_context *sctx)
2219 {
2220 struct si_shader_selector *sel = sctx->shader.ps.cso;
2221 union si_shader_key *key = &sctx->shader.ps.key;
2222 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2223
2224 if (!sel)
2225 return;
2226
2227 key->ps.part.prolog.color_two_side = rs->two_side && sel->info.colors_read;
2228 key->ps.part.prolog.flatshade_colors = rs->flatshade && sel->info.uses_interp_color;
2229 key->ps.part.epilog.clamp_color = rs->clamp_fragment_color;
2230 }
2231
si_ps_key_update_dsa(struct si_context * sctx)2232 void si_ps_key_update_dsa(struct si_context *sctx)
2233 {
2234 union si_shader_key *key = &sctx->shader.ps.key;
2235
2236 key->ps.part.epilog.alpha_func = sctx->queued.named.dsa->alpha_func;
2237 }
2238
si_ps_key_update_primtype_shader_rasterizer_framebuffer(struct si_context * sctx)2239 static void si_ps_key_update_primtype_shader_rasterizer_framebuffer(struct si_context *sctx)
2240 {
2241 union si_shader_key *key = &sctx->shader.ps.key;
2242 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2243
2244 bool is_poly = !util_prim_is_points_or_lines(sctx->current_rast_prim);
2245 bool is_line = util_prim_is_lines(sctx->current_rast_prim);
2246
2247 key->ps.part.prolog.poly_stipple = rs->poly_stipple_enable && is_poly;
2248 key->ps.mono.poly_line_smoothing =
2249 ((is_poly && rs->poly_smooth) || (is_line && rs->line_smooth)) &&
2250 sctx->framebuffer.nr_samples <= 1;
2251 }
2252
si_ps_key_update_sample_shading(struct si_context * sctx)2253 void si_ps_key_update_sample_shading(struct si_context *sctx)
2254 {
2255 struct si_shader_selector *sel = sctx->shader.ps.cso;
2256 union si_shader_key *key = &sctx->shader.ps.key;
2257
2258 if (!sel)
2259 return;
2260
2261 if (sctx->ps_iter_samples > 1 && sel->info.reads_samplemask)
2262 key->ps.part.prolog.samplemask_log_ps_iter = util_logbase2(sctx->ps_iter_samples);
2263 else
2264 key->ps.part.prolog.samplemask_log_ps_iter = 0;
2265 }
2266
si_ps_key_update_framebuffer_rasterizer_sample_shading(struct si_context * sctx)2267 void si_ps_key_update_framebuffer_rasterizer_sample_shading(struct si_context *sctx)
2268 {
2269 struct si_shader_selector *sel = sctx->shader.ps.cso;
2270 union si_shader_key *key = &sctx->shader.ps.key;
2271 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2272
2273 if (!sel)
2274 return;
2275
2276 bool uses_persp_center = sel->info.uses_persp_center ||
2277 (!rs->flatshade && sel->info.uses_persp_center_color);
2278 bool uses_persp_centroid = sel->info.uses_persp_centroid ||
2279 (!rs->flatshade && sel->info.uses_persp_centroid_color);
2280 bool uses_persp_sample = sel->info.uses_persp_sample ||
2281 (!rs->flatshade && sel->info.uses_persp_sample_color);
2282
2283 if (rs->force_persample_interp && rs->multisample_enable &&
2284 sctx->framebuffer.nr_samples > 1 && sctx->ps_iter_samples > 1) {
2285 key->ps.part.prolog.force_persp_sample_interp =
2286 uses_persp_center || uses_persp_centroid;
2287
2288 key->ps.part.prolog.force_linear_sample_interp =
2289 sel->info.uses_linear_center || sel->info.uses_linear_centroid;
2290
2291 key->ps.part.prolog.force_persp_center_interp = 0;
2292 key->ps.part.prolog.force_linear_center_interp = 0;
2293 key->ps.part.prolog.bc_optimize_for_persp = 0;
2294 key->ps.part.prolog.bc_optimize_for_linear = 0;
2295 key->ps.mono.interpolate_at_sample_force_center = 0;
2296 } else if (rs->multisample_enable && sctx->framebuffer.nr_samples > 1) {
2297 key->ps.part.prolog.force_persp_sample_interp = 0;
2298 key->ps.part.prolog.force_linear_sample_interp = 0;
2299 key->ps.part.prolog.force_persp_center_interp = 0;
2300 key->ps.part.prolog.force_linear_center_interp = 0;
2301 key->ps.part.prolog.bc_optimize_for_persp =
2302 uses_persp_center && uses_persp_centroid;
2303 key->ps.part.prolog.bc_optimize_for_linear =
2304 sel->info.uses_linear_center && sel->info.uses_linear_centroid;
2305 key->ps.mono.interpolate_at_sample_force_center = 0;
2306 } else {
2307 key->ps.part.prolog.force_persp_sample_interp = 0;
2308 key->ps.part.prolog.force_linear_sample_interp = 0;
2309
2310 /* Make sure SPI doesn't compute more than 1 pair
2311 * of (i,j), which is the optimization here. */
2312 key->ps.part.prolog.force_persp_center_interp = uses_persp_center +
2313 uses_persp_centroid +
2314 uses_persp_sample > 1;
2315
2316 key->ps.part.prolog.force_linear_center_interp = sel->info.uses_linear_center +
2317 sel->info.uses_linear_centroid +
2318 sel->info.uses_linear_sample > 1;
2319 key->ps.part.prolog.bc_optimize_for_persp = 0;
2320 key->ps.part.prolog.bc_optimize_for_linear = 0;
2321 key->ps.mono.interpolate_at_sample_force_center = sel->info.uses_interp_at_sample;
2322 }
2323 }
2324
2325 /* Compute the key for the hw shader variant */
si_shader_selector_key(struct pipe_context * ctx,struct si_shader_selector * sel,union si_shader_key * key)2326 static inline void si_shader_selector_key(struct pipe_context *ctx, struct si_shader_selector *sel,
2327 union si_shader_key *key)
2328 {
2329 struct si_context *sctx = (struct si_context *)ctx;
2330
2331 switch (sel->info.stage) {
2332 case MESA_SHADER_VERTEX:
2333 if (!sctx->shader.tes.cso && !sctx->shader.gs.cso)
2334 si_get_vs_key_outputs(sctx, sel, key);
2335 else
2336 si_clear_vs_key_outputs(sctx, sel, key);
2337 break;
2338 case MESA_SHADER_TESS_CTRL:
2339 if (sctx->chip_class >= GFX9) {
2340 si_get_vs_key_inputs(sctx, key, &key->ge.part.tcs.ls_prolog);
2341 key->ge.part.tcs.ls = sctx->shader.vs.cso;
2342 }
2343 break;
2344 case MESA_SHADER_TESS_EVAL:
2345 if (!sctx->shader.gs.cso)
2346 si_get_vs_key_outputs(sctx, sel, key);
2347 else
2348 si_clear_vs_key_outputs(sctx, sel, key);
2349 break;
2350 case MESA_SHADER_GEOMETRY:
2351 if (sctx->chip_class >= GFX9) {
2352 if (sctx->shader.tes.cso) {
2353 si_clear_vs_key_inputs(sctx, key, &key->ge.part.gs.vs_prolog);
2354 key->ge.part.gs.es = sctx->shader.tes.cso;
2355 } else {
2356 si_get_vs_key_inputs(sctx, key, &key->ge.part.gs.vs_prolog);
2357 key->ge.part.gs.es = sctx->shader.vs.cso;
2358 }
2359
2360 /* Only NGG can eliminate GS outputs, because the code is shared with VS. */
2361 if (sctx->ngg)
2362 si_get_vs_key_outputs(sctx, sel, key);
2363 else
2364 si_clear_vs_key_outputs(sctx, sel, key);
2365 }
2366 break;
2367 case MESA_SHADER_FRAGMENT:
2368 si_ps_key_update_primtype_shader_rasterizer_framebuffer(sctx);
2369 break;
2370 default:
2371 assert(0);
2372 }
2373 }
2374
si_build_shader_variant(struct si_shader * shader,int thread_index,bool low_priority)2375 static void si_build_shader_variant(struct si_shader *shader, int thread_index, bool low_priority)
2376 {
2377 struct si_shader_selector *sel = shader->selector;
2378 struct si_screen *sscreen = sel->screen;
2379 struct ac_llvm_compiler *compiler;
2380 struct pipe_debug_callback *debug = &shader->compiler_ctx_state.debug;
2381
2382 if (thread_index >= 0) {
2383 if (low_priority) {
2384 assert(thread_index < (int)ARRAY_SIZE(sscreen->compiler_lowp));
2385 compiler = &sscreen->compiler_lowp[thread_index];
2386 } else {
2387 assert(thread_index < (int)ARRAY_SIZE(sscreen->compiler));
2388 compiler = &sscreen->compiler[thread_index];
2389 }
2390 if (!debug->async)
2391 debug = NULL;
2392 } else {
2393 assert(!low_priority);
2394 compiler = shader->compiler_ctx_state.compiler;
2395 }
2396
2397 if (!compiler->passes)
2398 si_init_compiler(sscreen, compiler);
2399
2400 if (unlikely(!si_create_shader_variant(sscreen, compiler, shader, debug))) {
2401 PRINT_ERR("Failed to build shader variant (type=%u)\n", sel->info.stage);
2402 shader->compilation_failed = true;
2403 return;
2404 }
2405
2406 if (shader->compiler_ctx_state.is_debug_context) {
2407 FILE *f = open_memstream(&shader->shader_log, &shader->shader_log_size);
2408 if (f) {
2409 si_shader_dump(sscreen, shader, NULL, f, false);
2410 fclose(f);
2411 }
2412 }
2413
2414 si_shader_init_pm4_state(sscreen, shader);
2415 }
2416
si_build_shader_variant_low_priority(void * job,void * gdata,int thread_index)2417 static void si_build_shader_variant_low_priority(void *job, void *gdata, int thread_index)
2418 {
2419 struct si_shader *shader = (struct si_shader *)job;
2420
2421 assert(thread_index >= 0);
2422
2423 si_build_shader_variant(shader, thread_index, true);
2424 }
2425
2426 /* This should be const, but C++ doesn't allow implicit zero-initialization with const. */
2427 static union si_shader_key zeroed;
2428
si_check_missing_main_part(struct si_screen * sscreen,struct si_shader_selector * sel,struct si_compiler_ctx_state * compiler_state,const union si_shader_key * key)2429 static bool si_check_missing_main_part(struct si_screen *sscreen, struct si_shader_selector *sel,
2430 struct si_compiler_ctx_state *compiler_state,
2431 const union si_shader_key *key)
2432 {
2433 struct si_shader **mainp = si_get_main_shader_part(sel, key);
2434
2435 if (!*mainp) {
2436 struct si_shader *main_part = CALLOC_STRUCT(si_shader);
2437
2438 if (!main_part)
2439 return false;
2440
2441 /* We can leave the fence as permanently signaled because the
2442 * main part becomes visible globally only after it has been
2443 * compiled. */
2444 util_queue_fence_init(&main_part->ready);
2445
2446 main_part->selector = sel;
2447 if (sel->info.stage <= MESA_SHADER_GEOMETRY) {
2448 main_part->key.ge.as_es = key->ge.as_es;
2449 main_part->key.ge.as_ls = key->ge.as_ls;
2450 main_part->key.ge.as_ngg = key->ge.as_ngg;
2451 }
2452 main_part->is_monolithic = false;
2453 main_part->wave_size = si_determine_wave_size(sscreen, main_part);
2454
2455 if (!si_compile_shader(sscreen, compiler_state->compiler, main_part,
2456 &compiler_state->debug)) {
2457 FREE(main_part);
2458 return false;
2459 }
2460 *mainp = main_part;
2461 }
2462 return true;
2463 }
2464
2465 /* A helper to copy *key to *local_key and return local_key. */
2466 template<typename SHADER_KEY_TYPE>
2467 static ALWAYS_INLINE const SHADER_KEY_TYPE *
use_local_key_copy(const SHADER_KEY_TYPE * key,SHADER_KEY_TYPE * local_key,unsigned key_size)2468 use_local_key_copy(const SHADER_KEY_TYPE *key, SHADER_KEY_TYPE *local_key, unsigned key_size)
2469 {
2470 if (key != local_key)
2471 memcpy(local_key, key, key_size);
2472
2473 return local_key;
2474 }
2475
2476 #define NO_INLINE_UNIFORMS false
2477
2478 /**
2479 * Select a shader variant according to the shader key.
2480 *
2481 * \param optimized_or_none If the key describes an optimized shader variant and
2482 * the compilation isn't finished, don't select any
2483 * shader and return an error.
2484 *
2485 * This uses a C++ template to compute the optimal memcmp size at compile time, which is important
2486 * for getting inlined memcmp. The memcmp size depends on the shader key type and whether inlined
2487 * uniforms are enabled.
2488 */
2489 template<bool INLINE_UNIFORMS = true, typename SHADER_KEY_TYPE>
si_shader_select_with_key(struct si_context * sctx,struct si_shader_ctx_state * state,const SHADER_KEY_TYPE * key,int thread_index,bool optimized_or_none)2490 static int si_shader_select_with_key(struct si_context *sctx, struct si_shader_ctx_state *state,
2491 const SHADER_KEY_TYPE *key, int thread_index,
2492 bool optimized_or_none)
2493 {
2494 struct si_screen *sscreen = sctx->screen;
2495 struct si_shader_selector *sel = state->cso;
2496 struct si_shader_selector *previous_stage_sel = NULL;
2497 struct si_shader *current = state->current;
2498 struct si_shader *iter, *shader = NULL;
2499 const SHADER_KEY_TYPE *zeroed_key = (SHADER_KEY_TYPE*)&zeroed;
2500
2501 /* "opt" must be the last field and "inlined_uniform_values" must be the last field inside opt.
2502 * If there is padding, insert the padding manually before opt or inside opt.
2503 */
2504 STATIC_ASSERT(offsetof(SHADER_KEY_TYPE, opt) + sizeof(key->opt) == sizeof(*key));
2505 STATIC_ASSERT(offsetof(SHADER_KEY_TYPE, opt.inlined_uniform_values) +
2506 sizeof(key->opt.inlined_uniform_values) == sizeof(*key));
2507
2508 const unsigned key_size_no_uniforms = sizeof(*key) - sizeof(key->opt.inlined_uniform_values);
2509 /* Don't compare inlined_uniform_values if uniform inlining is disabled. */
2510 const unsigned key_size = INLINE_UNIFORMS ? sizeof(*key) : key_size_no_uniforms;
2511 const unsigned key_opt_size =
2512 INLINE_UNIFORMS ? sizeof(key->opt) :
2513 sizeof(key->opt) - sizeof(key->opt.inlined_uniform_values);
2514
2515 /* si_shader_select_with_key must not modify 'key' because it would affect future shaders.
2516 * If we need to modify it for this specific shader (eg: to disable optimizations), we
2517 * use a copy.
2518 */
2519 SHADER_KEY_TYPE local_key;
2520
2521 if (unlikely(sscreen->debug_flags & DBG(NO_OPT_VARIANT))) {
2522 /* Disable shader variant optimizations. */
2523 key = use_local_key_copy<SHADER_KEY_TYPE>(key, &local_key, key_size);
2524 memset(&local_key.opt, 0, key_opt_size);
2525 }
2526
2527 again:
2528 /* Check if we don't need to change anything.
2529 * This path is also used for most shaders that don't need multiple
2530 * variants, it will cost just a computation of the key and this
2531 * test. */
2532 if (likely(current && memcmp(¤t->key, key, key_size) == 0)) {
2533 if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) {
2534 if (current->is_optimized) {
2535 if (optimized_or_none)
2536 return -1;
2537
2538 key = use_local_key_copy(key, &local_key, key_size);
2539 memset(&local_key.opt, 0, key_opt_size);
2540 goto current_not_ready;
2541 }
2542
2543 util_queue_fence_wait(¤t->ready);
2544 }
2545
2546 return current->compilation_failed ? -1 : 0;
2547 }
2548 current_not_ready:
2549
2550 /* This must be done before the mutex is locked, because async GS
2551 * compilation calls this function too, and therefore must enter
2552 * the mutex first.
2553 *
2554 * Only wait if we are in a draw call. Don't wait if we are
2555 * in a compiler thread.
2556 */
2557 if (thread_index < 0)
2558 util_queue_fence_wait(&sel->ready);
2559
2560 simple_mtx_lock(&sel->mutex);
2561
2562 int variant_count = 0;
2563 const int max_inline_uniforms_variants = 5;
2564
2565 /* Find the shader variant. */
2566 for (iter = sel->first_variant; iter; iter = iter->next_variant) {
2567 const SHADER_KEY_TYPE *iter_key = (const SHADER_KEY_TYPE *)&iter->key;
2568
2569 if (memcmp(iter_key, key, key_size_no_uniforms) == 0) {
2570 /* Check the inlined uniform values separately, and count
2571 * the number of variants based on them.
2572 */
2573 if (key->opt.inline_uniforms &&
2574 memcmp(iter_key->opt.inlined_uniform_values,
2575 key->opt.inlined_uniform_values,
2576 MAX_INLINABLE_UNIFORMS * 4) != 0) {
2577 if (variant_count++ > max_inline_uniforms_variants) {
2578 key = use_local_key_copy(key, &local_key, key_size);
2579 /* Too many variants. Disable inlining for this shader. */
2580 local_key.opt.inline_uniforms = 0;
2581 memset(local_key.opt.inlined_uniform_values, 0, MAX_INLINABLE_UNIFORMS * 4);
2582 simple_mtx_unlock(&sel->mutex);
2583 goto again;
2584 }
2585 continue;
2586 }
2587
2588 simple_mtx_unlock(&sel->mutex);
2589
2590 if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
2591 /* If it's an optimized shader and its compilation has
2592 * been started but isn't done, use the unoptimized
2593 * shader so as not to cause a stall due to compilation.
2594 */
2595 if (iter->is_optimized) {
2596 if (optimized_or_none)
2597 return -1;
2598
2599 key = use_local_key_copy(key, &local_key, key_size);
2600 memset(&local_key.opt, 0, key_opt_size);
2601 goto again;
2602 }
2603
2604 util_queue_fence_wait(&iter->ready);
2605 }
2606
2607 if (iter->compilation_failed) {
2608 return -1; /* skip the draw call */
2609 }
2610
2611 state->current = iter;
2612 return 0;
2613 }
2614 }
2615
2616 /* Build a new shader. */
2617 shader = CALLOC_STRUCT(si_shader);
2618 if (!shader) {
2619 simple_mtx_unlock(&sel->mutex);
2620 return -ENOMEM;
2621 }
2622
2623 util_queue_fence_init(&shader->ready);
2624
2625 if (!sctx->compiler.passes)
2626 si_init_compiler(sctx->screen, &sctx->compiler);
2627
2628 shader->selector = sel;
2629 *((SHADER_KEY_TYPE*)&shader->key) = *key;
2630 shader->wave_size = si_determine_wave_size(sscreen, shader);
2631 shader->compiler_ctx_state.compiler = &sctx->compiler;
2632 shader->compiler_ctx_state.debug = sctx->debug;
2633 shader->compiler_ctx_state.is_debug_context = sctx->is_debug;
2634
2635 /* If this is a merged shader, get the first shader's selector. */
2636 if (sscreen->info.chip_class >= GFX9) {
2637 if (sel->info.stage == MESA_SHADER_TESS_CTRL)
2638 previous_stage_sel = ((struct si_shader_key_ge*)key)->part.tcs.ls;
2639 else if (sel->info.stage == MESA_SHADER_GEOMETRY)
2640 previous_stage_sel = ((struct si_shader_key_ge*)key)->part.gs.es;
2641
2642 /* We need to wait for the previous shader. */
2643 if (previous_stage_sel && thread_index < 0)
2644 util_queue_fence_wait(&previous_stage_sel->ready);
2645 }
2646
2647 bool is_pure_monolithic =
2648 sscreen->use_monolithic_shaders || memcmp(&key->mono, &zeroed_key->mono, sizeof(key->mono)) != 0;
2649
2650 /* Compile the main shader part if it doesn't exist. This can happen
2651 * if the initial guess was wrong.
2652 */
2653 if (!is_pure_monolithic) {
2654 bool ok = true;
2655
2656 /* Make sure the main shader part is present. This is needed
2657 * for shaders that can be compiled as VS, LS, or ES, and only
2658 * one of them is compiled at creation.
2659 *
2660 * It is also needed for GS, which can be compiled as non-NGG
2661 * and NGG.
2662 *
2663 * For merged shaders, check that the starting shader's main
2664 * part is present.
2665 */
2666 if (previous_stage_sel) {
2667 union si_shader_key shader1_key = zeroed;
2668
2669 if (sel->info.stage == MESA_SHADER_TESS_CTRL) {
2670 shader1_key.ge.as_ls = 1;
2671 } else if (sel->info.stage == MESA_SHADER_GEOMETRY) {
2672 shader1_key.ge.as_es = 1;
2673 shader1_key.ge.as_ngg = ((struct si_shader_key_ge*)key)->as_ngg; /* for Wave32 vs Wave64 */
2674 } else {
2675 assert(0);
2676 }
2677
2678 simple_mtx_lock(&previous_stage_sel->mutex);
2679 ok = si_check_missing_main_part(sscreen, previous_stage_sel, &shader->compiler_ctx_state,
2680 &shader1_key);
2681 simple_mtx_unlock(&previous_stage_sel->mutex);
2682 }
2683
2684 if (ok) {
2685 ok = si_check_missing_main_part(sscreen, sel, &shader->compiler_ctx_state,
2686 (union si_shader_key*)key);
2687 }
2688
2689 if (!ok) {
2690 FREE(shader);
2691 simple_mtx_unlock(&sel->mutex);
2692 return -ENOMEM; /* skip the draw call */
2693 }
2694 }
2695
2696 /* Keep the reference to the 1st shader of merged shaders, so that
2697 * Gallium can't destroy it before we destroy the 2nd shader.
2698 *
2699 * Set sctx = NULL, because it's unused if we're not releasing
2700 * the shader, and we don't have any sctx here.
2701 */
2702 si_shader_selector_reference(NULL, &shader->previous_stage_sel, previous_stage_sel);
2703
2704 /* Monolithic-only shaders don't make a distinction between optimized
2705 * and unoptimized. */
2706 shader->is_monolithic =
2707 is_pure_monolithic || memcmp(&key->opt, &zeroed_key->opt, key_opt_size) != 0;
2708
2709 shader->is_optimized = !is_pure_monolithic &&
2710 memcmp(&key->opt, &zeroed_key->opt, key_opt_size) != 0;
2711
2712 /* If it's an optimized shader, compile it asynchronously. */
2713 if (shader->is_optimized && thread_index < 0) {
2714 /* Compile it asynchronously. */
2715 util_queue_add_job(&sscreen->shader_compiler_queue_low_priority, shader, &shader->ready,
2716 si_build_shader_variant_low_priority, NULL, 0);
2717
2718 /* Add only after the ready fence was reset, to guard against a
2719 * race with si_bind_XX_shader. */
2720 if (!sel->last_variant) {
2721 sel->first_variant = shader;
2722 sel->last_variant = shader;
2723 } else {
2724 sel->last_variant->next_variant = shader;
2725 sel->last_variant = shader;
2726 }
2727
2728 /* Use the default (unoptimized) shader for now. */
2729 key = use_local_key_copy(key, &local_key, key_size);
2730 memset(&local_key.opt, 0, key_opt_size);
2731 simple_mtx_unlock(&sel->mutex);
2732
2733 if (sscreen->options.sync_compile)
2734 util_queue_fence_wait(&shader->ready);
2735
2736 if (optimized_or_none)
2737 return -1;
2738 goto again;
2739 }
2740
2741 /* Reset the fence before adding to the variant list. */
2742 util_queue_fence_reset(&shader->ready);
2743
2744 if (!sel->last_variant) {
2745 sel->first_variant = shader;
2746 sel->last_variant = shader;
2747 } else {
2748 sel->last_variant->next_variant = shader;
2749 sel->last_variant = shader;
2750 }
2751
2752 simple_mtx_unlock(&sel->mutex);
2753
2754 assert(!shader->is_optimized);
2755 si_build_shader_variant(shader, thread_index, false);
2756
2757 util_queue_fence_signal(&shader->ready);
2758
2759 if (!shader->compilation_failed)
2760 state->current = shader;
2761
2762 return shader->compilation_failed ? -1 : 0;
2763 }
2764
si_shader_select(struct pipe_context * ctx,struct si_shader_ctx_state * state)2765 int si_shader_select(struct pipe_context *ctx, struct si_shader_ctx_state *state)
2766 {
2767 struct si_context *sctx = (struct si_context *)ctx;
2768
2769 si_shader_selector_key(ctx, state->cso, &state->key);
2770
2771 if (state->cso->info.stage == MESA_SHADER_FRAGMENT) {
2772 if (state->key.ps.opt.inline_uniforms)
2773 return si_shader_select_with_key(sctx, state, &state->key.ps, -1, false);
2774 else
2775 return si_shader_select_with_key<NO_INLINE_UNIFORMS>(sctx, state, &state->key.ps, -1, false);
2776 } else {
2777 if (state->key.ge.opt.inline_uniforms) {
2778 return si_shader_select_with_key(sctx, state, &state->key.ge, -1, false);
2779 } else {
2780 return si_shader_select_with_key<NO_INLINE_UNIFORMS>(sctx, state, &state->key.ge, -1, false);
2781 }
2782 }
2783 }
2784
si_parse_next_shader_property(const struct si_shader_info * info,bool streamout,union si_shader_key * key)2785 static void si_parse_next_shader_property(const struct si_shader_info *info, bool streamout,
2786 union si_shader_key *key)
2787 {
2788 gl_shader_stage next_shader = info->base.next_stage;
2789
2790 switch (info->stage) {
2791 case MESA_SHADER_VERTEX:
2792 switch (next_shader) {
2793 case MESA_SHADER_GEOMETRY:
2794 key->ge.as_es = 1;
2795 break;
2796 case MESA_SHADER_TESS_CTRL:
2797 case MESA_SHADER_TESS_EVAL:
2798 key->ge.as_ls = 1;
2799 break;
2800 default:
2801 /* If POSITION isn't written, it can only be a HW VS
2802 * if streamout is used. If streamout isn't used,
2803 * assume that it's a HW LS. (the next shader is TCS)
2804 * This heuristic is needed for separate shader objects.
2805 */
2806 if (!info->writes_position && !streamout)
2807 key->ge.as_ls = 1;
2808 }
2809 break;
2810
2811 case MESA_SHADER_TESS_EVAL:
2812 if (next_shader == MESA_SHADER_GEOMETRY || !info->writes_position)
2813 key->ge.as_es = 1;
2814 break;
2815
2816 default:;
2817 }
2818 }
2819
2820 /**
2821 * Compile the main shader part or the monolithic shader as part of
2822 * si_shader_selector initialization. Since it can be done asynchronously,
2823 * there is no way to report compile failures to applications.
2824 */
si_init_shader_selector_async(void * job,void * gdata,int thread_index)2825 static void si_init_shader_selector_async(void *job, void *gdata, int thread_index)
2826 {
2827 struct si_shader_selector *sel = (struct si_shader_selector *)job;
2828 struct si_screen *sscreen = sel->screen;
2829 struct ac_llvm_compiler *compiler;
2830 struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
2831
2832 assert(!debug->debug_message || debug->async);
2833 assert(thread_index >= 0);
2834 assert(thread_index < (int)ARRAY_SIZE(sscreen->compiler));
2835 compiler = &sscreen->compiler[thread_index];
2836
2837 if (!compiler->passes)
2838 si_init_compiler(sscreen, compiler);
2839
2840 /* Serialize NIR to save memory. Monolithic shader variants
2841 * have to deserialize NIR before compilation.
2842 */
2843 if (sel->nir) {
2844 struct blob blob;
2845 size_t size;
2846
2847 blob_init(&blob);
2848 /* true = remove optional debugging data to increase
2849 * the likehood of getting more shader cache hits.
2850 * It also drops variable names, so we'll save more memory.
2851 */
2852 nir_serialize(&blob, sel->nir, true);
2853 blob_finish_get_buffer(&blob, &sel->nir_binary, &size);
2854 sel->nir_size = size;
2855 }
2856
2857 /* Compile the main shader part for use with a prolog and/or epilog.
2858 * If this fails, the driver will try to compile a monolithic shader
2859 * on demand.
2860 */
2861 if (!sscreen->use_monolithic_shaders) {
2862 struct si_shader *shader = CALLOC_STRUCT(si_shader);
2863 unsigned char ir_sha1_cache_key[20];
2864
2865 if (!shader) {
2866 fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
2867 return;
2868 }
2869
2870 /* We can leave the fence signaled because use of the default
2871 * main part is guarded by the selector's ready fence. */
2872 util_queue_fence_init(&shader->ready);
2873
2874 shader->selector = sel;
2875 shader->is_monolithic = false;
2876 si_parse_next_shader_property(&sel->info, sel->so.num_outputs != 0, &shader->key);
2877
2878 if (sel->info.stage <= MESA_SHADER_GEOMETRY &&
2879 sscreen->use_ngg && (!sel->so.num_outputs || sscreen->use_ngg_streamout) &&
2880 ((sel->info.stage == MESA_SHADER_VERTEX && !shader->key.ge.as_ls) ||
2881 sel->info.stage == MESA_SHADER_TESS_EVAL || sel->info.stage == MESA_SHADER_GEOMETRY))
2882 shader->key.ge.as_ngg = 1;
2883
2884 shader->wave_size = si_determine_wave_size(sscreen, shader);
2885
2886 if (sel->nir) {
2887 if (sel->info.stage <= MESA_SHADER_GEOMETRY) {
2888 si_get_ir_cache_key(sel, shader->key.ge.as_ngg, shader->key.ge.as_es,
2889 shader->wave_size, ir_sha1_cache_key);
2890 } else {
2891 si_get_ir_cache_key(sel, false, false, shader->wave_size, ir_sha1_cache_key);
2892 }
2893 }
2894
2895 /* Try to load the shader from the shader cache. */
2896 simple_mtx_lock(&sscreen->shader_cache_mutex);
2897
2898 if (si_shader_cache_load_shader(sscreen, ir_sha1_cache_key, shader)) {
2899 simple_mtx_unlock(&sscreen->shader_cache_mutex);
2900 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
2901 } else {
2902 simple_mtx_unlock(&sscreen->shader_cache_mutex);
2903
2904 /* Compile the shader if it hasn't been loaded from the cache. */
2905 if (!si_compile_shader(sscreen, compiler, shader, debug)) {
2906 FREE(shader);
2907 fprintf(stderr, "radeonsi: can't compile a main shader part\n");
2908 return;
2909 }
2910
2911 simple_mtx_lock(&sscreen->shader_cache_mutex);
2912 si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key, shader, true);
2913 simple_mtx_unlock(&sscreen->shader_cache_mutex);
2914 }
2915
2916 *si_get_main_shader_part(sel, &shader->key) = shader;
2917
2918 /* Unset "outputs_written" flags for outputs converted to
2919 * DEFAULT_VAL, so that later inter-shader optimizations don't
2920 * try to eliminate outputs that don't exist in the final
2921 * shader.
2922 *
2923 * This is only done if non-monolithic shaders are enabled.
2924 */
2925 if ((sel->info.stage == MESA_SHADER_VERTEX ||
2926 sel->info.stage == MESA_SHADER_TESS_EVAL ||
2927 sel->info.stage == MESA_SHADER_GEOMETRY) &&
2928 !shader->key.ge.as_ls && !shader->key.ge.as_es) {
2929 unsigned i;
2930
2931 for (i = 0; i < sel->info.num_outputs; i++) {
2932 unsigned semantic = sel->info.output_semantic[i];
2933 unsigned ps_input_cntl = shader->info.vs_output_ps_input_cntl[semantic];
2934
2935 /* OFFSET=0x20 means DEFAULT_VAL, which means VS doesn't export it. */
2936 if (G_028644_OFFSET(ps_input_cntl) != 0x20)
2937 continue;
2938
2939 unsigned id;
2940
2941 /* Remove the output from the mask. */
2942 if ((semantic <= VARYING_SLOT_VAR31 || semantic >= VARYING_SLOT_VAR0_16BIT) &&
2943 semantic != VARYING_SLOT_POS &&
2944 semantic != VARYING_SLOT_PSIZ &&
2945 semantic != VARYING_SLOT_CLIP_VERTEX &&
2946 semantic != VARYING_SLOT_EDGE) {
2947 id = si_shader_io_get_unique_index(semantic, true);
2948 sel->outputs_written_before_ps &= ~(1ull << id);
2949 }
2950 }
2951 }
2952 }
2953
2954 /* Free NIR. We only keep serialized NIR after this point. */
2955 if (sel->nir) {
2956 ralloc_free(sel->nir);
2957 sel->nir = NULL;
2958 }
2959 }
2960
si_schedule_initial_compile(struct si_context * sctx,gl_shader_stage stage,struct util_queue_fence * ready_fence,struct si_compiler_ctx_state * compiler_ctx_state,void * job,util_queue_execute_func execute)2961 void si_schedule_initial_compile(struct si_context *sctx, gl_shader_stage stage,
2962 struct util_queue_fence *ready_fence,
2963 struct si_compiler_ctx_state *compiler_ctx_state, void *job,
2964 util_queue_execute_func execute)
2965 {
2966 util_queue_fence_init(ready_fence);
2967
2968 struct util_async_debug_callback async_debug;
2969 bool debug = (sctx->debug.debug_message && !sctx->debug.async) || sctx->is_debug ||
2970 si_can_dump_shader(sctx->screen, stage);
2971
2972 if (debug) {
2973 u_async_debug_init(&async_debug);
2974 compiler_ctx_state->debug = async_debug.base;
2975 }
2976
2977 util_queue_add_job(&sctx->screen->shader_compiler_queue, job, ready_fence, execute, NULL, 0);
2978
2979 if (debug) {
2980 util_queue_fence_wait(ready_fence);
2981 u_async_debug_drain(&async_debug, &sctx->debug);
2982 u_async_debug_cleanup(&async_debug);
2983 }
2984
2985 if (sctx->screen->options.sync_compile)
2986 util_queue_fence_wait(ready_fence);
2987 }
2988
2989 /* Return descriptor slot usage masks from the given shader info. */
si_get_active_slot_masks(const struct si_shader_info * info,uint64_t * const_and_shader_buffers,uint64_t * samplers_and_images)2990 void si_get_active_slot_masks(const struct si_shader_info *info, uint64_t *const_and_shader_buffers,
2991 uint64_t *samplers_and_images)
2992 {
2993 unsigned start, num_shaderbufs, num_constbufs, num_images, num_msaa_images, num_samplers;
2994
2995 num_shaderbufs = info->base.num_ssbos;
2996 num_constbufs = info->base.num_ubos;
2997 /* two 8-byte images share one 16-byte slot */
2998 num_images = align(info->base.num_images, 2);
2999 num_msaa_images = align(util_last_bit(info->base.msaa_images), 2);
3000 num_samplers = BITSET_LAST_BIT(info->base.textures_used);
3001
3002 /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
3003 start = si_get_shaderbuf_slot(num_shaderbufs - 1);
3004 *const_and_shader_buffers = u_bit_consecutive64(start, num_shaderbufs + num_constbufs);
3005
3006 /* The layout is:
3007 * - fmask[last] ... fmask[0] go to [15-last .. 15]
3008 * - image[last] ... image[0] go to [31-last .. 31]
3009 * - sampler[0] ... sampler[last] go to [32 .. 32+last*2]
3010 *
3011 * FMASKs for images are placed separately, because MSAA images are rare,
3012 * and so we can benefit from a better cache hit rate if we keep image
3013 * descriptors together.
3014 */
3015 if (num_msaa_images)
3016 num_images = SI_NUM_IMAGES + num_msaa_images; /* add FMASK descriptors */
3017
3018 start = si_get_image_slot(num_images - 1) / 2;
3019 *samplers_and_images = u_bit_consecutive64(start, num_images / 2 + num_samplers);
3020 }
3021
si_create_shader_selector(struct pipe_context * ctx,const struct pipe_shader_state * state)3022 static void *si_create_shader_selector(struct pipe_context *ctx,
3023 const struct pipe_shader_state *state)
3024 {
3025 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
3026 struct si_context *sctx = (struct si_context *)ctx;
3027 struct si_shader_selector *sel = CALLOC_STRUCT(si_shader_selector);
3028 int i;
3029
3030 if (!sel)
3031 return NULL;
3032
3033 sel->screen = sscreen;
3034 sel->compiler_ctx_state.debug = sctx->debug;
3035 sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
3036
3037 sel->so = state->stream_output;
3038
3039 if (state->type == PIPE_SHADER_IR_TGSI) {
3040 sel->nir = tgsi_to_nir(state->tokens, ctx->screen, true);
3041 } else {
3042 assert(state->type == PIPE_SHADER_IR_NIR);
3043 sel->nir = (nir_shader*)state->ir.nir;
3044 }
3045
3046 si_nir_scan_shader(sel->nir, &sel->info);
3047
3048 const enum pipe_shader_type type = pipe_shader_type_from_mesa(sel->info.stage);
3049 sel->pipe_shader_type = type;
3050 sel->const_and_shader_buf_descriptors_index =
3051 si_const_and_shader_buffer_descriptors_idx(type);
3052 sel->sampler_and_images_descriptors_index =
3053 si_sampler_and_image_descriptors_idx(type);
3054
3055 p_atomic_inc(&sscreen->num_shaders_created);
3056 si_get_active_slot_masks(&sel->info, &sel->active_const_and_shader_buffers,
3057 &sel->active_samplers_and_images);
3058
3059 /* Record which streamout buffers are enabled. */
3060 for (unsigned i = 0; i < sel->so.num_outputs; i++) {
3061 sel->enabled_streamout_buffer_mask |= (1 << sel->so.output[i].output_buffer)
3062 << (sel->so.output[i].stream * 4);
3063 }
3064
3065 sel->num_vs_inputs =
3066 sel->info.stage == MESA_SHADER_VERTEX && !sel->info.base.vs.blit_sgprs_amd
3067 ? sel->info.num_inputs
3068 : 0;
3069 unsigned num_vbos_in_sgprs = si_num_vbos_in_user_sgprs_inline(sscreen->info.chip_class);
3070 sel->num_vbos_in_user_sgprs = MIN2(sel->num_vs_inputs, num_vbos_in_sgprs);
3071
3072 /* The prolog is a no-op if there are no inputs. */
3073 sel->vs_needs_prolog = sel->info.stage == MESA_SHADER_VERTEX && sel->info.num_inputs &&
3074 !sel->info.base.vs.blit_sgprs_amd;
3075
3076 if (sel->info.stage == MESA_SHADER_VERTEX ||
3077 sel->info.stage == MESA_SHADER_TESS_CTRL ||
3078 sel->info.stage == MESA_SHADER_TESS_EVAL ||
3079 sel->info.stage == MESA_SHADER_GEOMETRY) {
3080 if (sel->info.stage == MESA_SHADER_TESS_CTRL) {
3081 /* Always reserve space for these. */
3082 sel->patch_outputs_written |=
3083 (1ull << si_shader_io_get_unique_index_patch(VARYING_SLOT_TESS_LEVEL_INNER)) |
3084 (1ull << si_shader_io_get_unique_index_patch(VARYING_SLOT_TESS_LEVEL_OUTER));
3085 }
3086 for (i = 0; i < sel->info.num_outputs; i++) {
3087 unsigned semantic = sel->info.output_semantic[i];
3088
3089 if (semantic == VARYING_SLOT_TESS_LEVEL_INNER ||
3090 semantic == VARYING_SLOT_TESS_LEVEL_OUTER ||
3091 (semantic >= VARYING_SLOT_PATCH0 && semantic < VARYING_SLOT_TESS_MAX)) {
3092 sel->patch_outputs_written |= 1ull << si_shader_io_get_unique_index_patch(semantic);
3093 } else if ((semantic <= VARYING_SLOT_VAR31 || semantic >= VARYING_SLOT_VAR0_16BIT) &&
3094 semantic != VARYING_SLOT_EDGE) {
3095 sel->outputs_written |= 1ull << si_shader_io_get_unique_index(semantic, false);
3096
3097 /* Ignore outputs that are not passed from VS to PS. */
3098 if (semantic != VARYING_SLOT_POS &&
3099 semantic != VARYING_SLOT_PSIZ &&
3100 semantic != VARYING_SLOT_CLIP_VERTEX) {
3101 sel->outputs_written_before_ps |= 1ull
3102 << si_shader_io_get_unique_index(semantic, true);
3103 }
3104 }
3105 }
3106 }
3107
3108 switch (sel->info.stage) {
3109 case MESA_SHADER_GEOMETRY:
3110 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
3111 sel->rast_prim = (enum pipe_prim_type)sel->info.base.gs.output_primitive;
3112 if (util_rast_prim_is_triangles(sel->rast_prim))
3113 sel->rast_prim = PIPE_PRIM_TRIANGLES;
3114
3115 sel->gsvs_vertex_size = sel->info.num_outputs * 16;
3116 sel->max_gsvs_emit_size = sel->gsvs_vertex_size * sel->info.base.gs.vertices_out;
3117 sel->gs_input_verts_per_prim =
3118 u_vertices_per_prim((enum pipe_prim_type)sel->info.base.gs.input_primitive);
3119
3120 /* EN_MAX_VERT_OUT_PER_GS_INSTANCE does not work with tesselation so
3121 * we can't split workgroups. Disable ngg if any of the following conditions is true:
3122 * - num_invocations * gs.vertices_out > 256
3123 * - LDS usage is too high
3124 */
3125 sel->tess_turns_off_ngg = sscreen->info.chip_class >= GFX10 &&
3126 (sel->info.base.gs.invocations * sel->info.base.gs.vertices_out > 256 ||
3127 sel->info.base.gs.invocations * sel->info.base.gs.vertices_out *
3128 (sel->info.num_outputs * 4 + 1) > 6500 /* max dw per GS primitive */);
3129 break;
3130
3131 case MESA_SHADER_VERTEX:
3132 case MESA_SHADER_TESS_CTRL:
3133 case MESA_SHADER_TESS_EVAL:
3134 sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16;
3135 sel->lshs_vertex_stride = sel->esgs_itemsize;
3136
3137 /* Add 1 dword to reduce LDS bank conflicts, so that each vertex
3138 * will start on a different bank. (except for the maximum 32*16).
3139 */
3140 if (sel->lshs_vertex_stride < 32 * 16)
3141 sel->lshs_vertex_stride += 4;
3142
3143 /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
3144 * conflicts, i.e. each vertex will start at a different bank.
3145 */
3146 if (sctx->chip_class >= GFX9)
3147 sel->esgs_itemsize += 4;
3148
3149 assert(((sel->esgs_itemsize / 4) & C_028AAC_ITEMSIZE) == 0);
3150
3151 sel->tcs_vgpr_only_inputs = ~sel->info.base.tess.tcs_cross_invocation_inputs_read &
3152 ~sel->info.base.inputs_read_indirectly &
3153 sel->info.base.inputs_read;
3154
3155 /* Only for TES: */
3156 if (sel->info.stage == MESA_SHADER_TESS_EVAL) {
3157 if (sel->info.base.tess.point_mode)
3158 sel->rast_prim = PIPE_PRIM_POINTS;
3159 else if (sel->info.base.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES)
3160 sel->rast_prim = PIPE_PRIM_LINE_STRIP;
3161 else
3162 sel->rast_prim = PIPE_PRIM_TRIANGLES;
3163 } else {
3164 sel->rast_prim = PIPE_PRIM_TRIANGLES;
3165 }
3166 break;
3167
3168 case MESA_SHADER_FRAGMENT:
3169 for (i = 0; i < sel->info.num_inputs; i++) {
3170 unsigned semantic = sel->info.input[i].semantic;
3171
3172 if ((semantic <= VARYING_SLOT_VAR31 || semantic >= VARYING_SLOT_VAR0_16BIT) &&
3173 semantic != VARYING_SLOT_PNTC) {
3174 sel->inputs_read |= 1ull << si_shader_io_get_unique_index(semantic, true);
3175 }
3176 }
3177
3178 for (i = 0; i < 8; i++)
3179 if (sel->info.colors_written & (1 << i))
3180 sel->colors_written_4bit |= 0xf << (4 * i);
3181
3182 for (i = 0; i < sel->info.num_inputs; i++) {
3183 if (sel->info.input[i].semantic == VARYING_SLOT_COL0)
3184 sel->color_attr_index[0] = i;
3185 else if (sel->info.input[i].semantic == VARYING_SLOT_COL1)
3186 sel->color_attr_index[1] = i;
3187 }
3188 break;
3189 default:;
3190 }
3191
3192 bool ngg_culling_allowed =
3193 sscreen->info.chip_class >= GFX10 &&
3194 sscreen->use_ngg_culling &&
3195 sel->info.writes_position &&
3196 !sel->info.writes_viewport_index && /* cull only against viewport 0 */
3197 !sel->info.base.writes_memory &&
3198 /* NGG GS supports culling with streamout because it culls after streamout. */
3199 (sel->info.stage == MESA_SHADER_GEOMETRY || !sel->so.num_outputs) &&
3200 (sel->info.stage != MESA_SHADER_GEOMETRY || sel->info.num_stream_output_components[0]) &&
3201 (sel->info.stage != MESA_SHADER_VERTEX ||
3202 (!sel->info.base.vs.blit_sgprs_amd &&
3203 !sel->info.base.vs.window_space_position));
3204
3205 sel->ngg_cull_vert_threshold = UINT_MAX; /* disabled (changed below) */
3206
3207 if (ngg_culling_allowed) {
3208 if (sel->info.stage == MESA_SHADER_VERTEX) {
3209 if (sscreen->debug_flags & DBG(ALWAYS_NGG_CULLING_ALL))
3210 sel->ngg_cull_vert_threshold = 0; /* always enabled */
3211 else
3212 sel->ngg_cull_vert_threshold = 128;
3213 } else if (sel->info.stage == MESA_SHADER_TESS_EVAL ||
3214 sel->info.stage == MESA_SHADER_GEOMETRY) {
3215 if (sel->rast_prim != PIPE_PRIM_POINTS)
3216 sel->ngg_cull_vert_threshold = 0; /* always enabled */
3217 }
3218 }
3219
3220 sel->clipdist_mask = sel->info.writes_clipvertex ? SIX_BITS :
3221 u_bit_consecutive(0, sel->info.base.clip_distance_array_size);
3222 sel->culldist_mask = u_bit_consecutive(0, sel->info.base.cull_distance_array_size) <<
3223 sel->info.base.clip_distance_array_size;
3224
3225 /* DB_SHADER_CONTROL */
3226 sel->db_shader_control = S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) |
3227 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel->info.writes_stencil) |
3228 S_02880C_MASK_EXPORT_ENABLE(sel->info.writes_samplemask) |
3229 S_02880C_KILL_ENABLE(sel->info.base.fs.uses_discard);
3230
3231 if (sel->info.stage == MESA_SHADER_FRAGMENT) {
3232 switch (sel->info.base.fs.depth_layout) {
3233 case FRAG_DEPTH_LAYOUT_GREATER:
3234 sel->db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z);
3235 break;
3236 case FRAG_DEPTH_LAYOUT_LESS:
3237 sel->db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z);
3238 break;
3239 default:;
3240 }
3241
3242 /* Z_ORDER, EXEC_ON_HIER_FAIL and EXEC_ON_NOOP should be set as following:
3243 *
3244 * | early Z/S | writes_mem | allow_ReZ? | Z_ORDER | EXEC_ON_HIER_FAIL | EXEC_ON_NOOP
3245 * --|-----------|------------|------------|--------------------|-------------------|-------------
3246 * 1a| false | false | true | EarlyZ_Then_ReZ | 0 | 0
3247 * 1b| false | false | false | EarlyZ_Then_LateZ | 0 | 0
3248 * 2 | false | true | n/a | LateZ | 1 | 0
3249 * 3 | true | false | n/a | EarlyZ_Then_LateZ | 0 | 0
3250 * 4 | true | true | n/a | EarlyZ_Then_LateZ | 0 | 1
3251 *
3252 * In cases 3 and 4, HW will force Z_ORDER to EarlyZ regardless of what's set in the register.
3253 * In case 2, NOOP_CULL is a don't care field. In case 2, 3 and 4, ReZ doesn't make sense.
3254 *
3255 * Don't use ReZ without profiling !!!
3256 *
3257 * ReZ decreases performance by 15% in DiRT: Showdown on Ultra settings, which has pretty complex
3258 * shaders.
3259 */
3260 if (sel->info.base.fs.early_fragment_tests) {
3261 /* Cases 3, 4. */
3262 sel->db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1) |
3263 S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
3264 S_02880C_EXEC_ON_NOOP(sel->info.base.writes_memory);
3265 } else if (sel->info.base.writes_memory) {
3266 /* Case 2. */
3267 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z) | S_02880C_EXEC_ON_HIER_FAIL(1);
3268 } else {
3269 /* Case 1. */
3270 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
3271 }
3272
3273 if (sel->info.base.fs.post_depth_coverage)
3274 sel->db_shader_control |= S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(1);
3275 }
3276
3277 (void)simple_mtx_init(&sel->mutex, mtx_plain);
3278
3279 si_schedule_initial_compile(sctx, sel->info.stage, &sel->ready, &sel->compiler_ctx_state,
3280 sel, si_init_shader_selector_async);
3281 return sel;
3282 }
3283
si_create_shader(struct pipe_context * ctx,const struct pipe_shader_state * state)3284 static void *si_create_shader(struct pipe_context *ctx, const struct pipe_shader_state *state)
3285 {
3286 struct si_context *sctx = (struct si_context *)ctx;
3287 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
3288 bool cache_hit;
3289 struct si_shader_selector *sel = (struct si_shader_selector *)util_live_shader_cache_get(
3290 ctx, &sscreen->live_shader_cache, state, &cache_hit);
3291
3292 if (sel && cache_hit && sctx->debug.debug_message) {
3293 if (sel->main_shader_part)
3294 si_shader_dump_stats_for_shader_db(sscreen, sel->main_shader_part, &sctx->debug);
3295 if (sel->main_shader_part_ls)
3296 si_shader_dump_stats_for_shader_db(sscreen, sel->main_shader_part_ls, &sctx->debug);
3297 if (sel->main_shader_part_es)
3298 si_shader_dump_stats_for_shader_db(sscreen, sel->main_shader_part_es, &sctx->debug);
3299 if (sel->main_shader_part_ngg)
3300 si_shader_dump_stats_for_shader_db(sscreen, sel->main_shader_part_ngg, &sctx->debug);
3301 if (sel->main_shader_part_ngg_es)
3302 si_shader_dump_stats_for_shader_db(sscreen, sel->main_shader_part_ngg_es, &sctx->debug);
3303 }
3304 return sel;
3305 }
3306
si_update_streamout_state(struct si_context * sctx)3307 static void si_update_streamout_state(struct si_context *sctx)
3308 {
3309 struct si_shader_selector *shader_with_so = si_get_vs(sctx)->cso;
3310
3311 if (!shader_with_so)
3312 return;
3313
3314 sctx->streamout.enabled_stream_buffers_mask = shader_with_so->enabled_streamout_buffer_mask;
3315 sctx->streamout.stride_in_dw = shader_with_so->so.stride;
3316 }
3317
si_update_clip_regs(struct si_context * sctx,struct si_shader_selector * old_hw_vs,struct si_shader * old_hw_vs_variant,struct si_shader_selector * next_hw_vs,struct si_shader * next_hw_vs_variant)3318 static void si_update_clip_regs(struct si_context *sctx, struct si_shader_selector *old_hw_vs,
3319 struct si_shader *old_hw_vs_variant,
3320 struct si_shader_selector *next_hw_vs,
3321 struct si_shader *next_hw_vs_variant)
3322 {
3323 if (next_hw_vs &&
3324 (!old_hw_vs ||
3325 (old_hw_vs->info.stage == MESA_SHADER_VERTEX && old_hw_vs->info.base.vs.window_space_position) !=
3326 (next_hw_vs->info.stage == MESA_SHADER_VERTEX && next_hw_vs->info.base.vs.window_space_position) ||
3327 old_hw_vs->clipdist_mask != next_hw_vs->clipdist_mask ||
3328 old_hw_vs->culldist_mask != next_hw_vs->culldist_mask || !old_hw_vs_variant ||
3329 !next_hw_vs_variant ||
3330 old_hw_vs_variant->pa_cl_vs_out_cntl != next_hw_vs_variant->pa_cl_vs_out_cntl))
3331 si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
3332 }
3333
si_update_rasterized_prim(struct si_context * sctx)3334 static void si_update_rasterized_prim(struct si_context *sctx)
3335 {
3336 enum pipe_prim_type rast_prim;
3337
3338 if (sctx->shader.gs.cso) {
3339 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
3340 rast_prim = sctx->shader.gs.cso->rast_prim;
3341 } else if (sctx->shader.tes.cso) {
3342 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
3343 rast_prim = sctx->shader.tes.cso->rast_prim;
3344 } else {
3345 /* Determined by draw calls. */
3346 return;
3347 }
3348
3349 if (rast_prim != sctx->current_rast_prim) {
3350 if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
3351 util_prim_is_points_or_lines(rast_prim))
3352 si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
3353
3354 sctx->current_rast_prim = rast_prim;
3355 }
3356 }
3357
si_update_common_shader_state(struct si_context * sctx,struct si_shader_selector * sel,enum pipe_shader_type type)3358 static void si_update_common_shader_state(struct si_context *sctx, struct si_shader_selector *sel,
3359 enum pipe_shader_type type)
3360 {
3361 si_set_active_descriptors_for_shader(sctx, sel);
3362
3363 sctx->uses_bindless_samplers = si_shader_uses_bindless_samplers(sctx->shader.vs.cso) ||
3364 si_shader_uses_bindless_samplers(sctx->shader.gs.cso) ||
3365 si_shader_uses_bindless_samplers(sctx->shader.ps.cso) ||
3366 si_shader_uses_bindless_samplers(sctx->shader.tcs.cso) ||
3367 si_shader_uses_bindless_samplers(sctx->shader.tes.cso);
3368 sctx->uses_bindless_images = si_shader_uses_bindless_images(sctx->shader.vs.cso) ||
3369 si_shader_uses_bindless_images(sctx->shader.gs.cso) ||
3370 si_shader_uses_bindless_images(sctx->shader.ps.cso) ||
3371 si_shader_uses_bindless_images(sctx->shader.tcs.cso) ||
3372 si_shader_uses_bindless_images(sctx->shader.tes.cso);
3373
3374 if (type == PIPE_SHADER_VERTEX || type == PIPE_SHADER_TESS_EVAL || type == PIPE_SHADER_GEOMETRY)
3375 sctx->ngg_culling = 0; /* this will be enabled on the first draw if needed */
3376
3377 si_invalidate_inlinable_uniforms(sctx, type);
3378 sctx->do_update_shaders = true;
3379 }
3380
si_bind_vs_shader(struct pipe_context * ctx,void * state)3381 static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
3382 {
3383 struct si_context *sctx = (struct si_context *)ctx;
3384 struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
3385 struct si_shader *old_hw_vs_variant = si_get_vs(sctx)->current;
3386 struct si_shader_selector *sel = (struct si_shader_selector*)state;
3387
3388 if (sctx->shader.vs.cso == sel)
3389 return;
3390
3391 sctx->shader.vs.cso = sel;
3392 sctx->shader.vs.current = sel ? sel->first_variant : NULL;
3393 sctx->num_vs_blit_sgprs = sel ? sel->info.base.vs.blit_sgprs_amd : 0;
3394 sctx->vs_uses_draw_id = sel ? sel->info.uses_drawid : false;
3395 sctx->fixed_func_tcs_shader.key.ge.mono.u.ff_tcs_inputs_to_copy = sel ? sel->outputs_written : 0;
3396
3397 if (si_update_ngg(sctx))
3398 si_shader_change_notify(sctx);
3399
3400 si_update_common_shader_state(sctx, sel, PIPE_SHADER_VERTEX);
3401 si_select_draw_vbo(sctx);
3402 si_update_vs_viewport_state(sctx);
3403 si_update_streamout_state(sctx);
3404 si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant, si_get_vs(sctx)->cso,
3405 si_get_vs(sctx)->current);
3406 si_update_rasterized_prim(sctx);
3407 si_vs_key_update_inputs(sctx);
3408
3409 if (sctx->screen->dpbb_allowed) {
3410 bool force_off = sel && sel->info.options & SI_PROFILE_VS_NO_BINNING;
3411
3412 if (force_off != sctx->dpbb_force_off_profile_vs) {
3413 sctx->dpbb_force_off_profile_vs = force_off;
3414 si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
3415 }
3416 }
3417 }
3418
si_update_tess_uses_prim_id(struct si_context * sctx)3419 static void si_update_tess_uses_prim_id(struct si_context *sctx)
3420 {
3421 sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id =
3422 (sctx->shader.tes.cso && sctx->shader.tes.cso->info.uses_primid) ||
3423 (sctx->shader.tcs.cso && sctx->shader.tcs.cso->info.uses_primid) ||
3424 (sctx->shader.gs.cso && sctx->shader.gs.cso->info.uses_primid) ||
3425 (sctx->shader.ps.cso && !sctx->shader.gs.cso && sctx->shader.ps.cso->info.uses_primid);
3426 }
3427
si_update_ngg(struct si_context * sctx)3428 bool si_update_ngg(struct si_context *sctx)
3429 {
3430 if (!sctx->screen->use_ngg) {
3431 assert(!sctx->ngg);
3432 return false;
3433 }
3434
3435 bool new_ngg = true;
3436
3437 if (sctx->shader.gs.cso && sctx->shader.tes.cso && sctx->shader.gs.cso->tess_turns_off_ngg) {
3438 new_ngg = false;
3439 } else if (!sctx->screen->use_ngg_streamout) {
3440 struct si_shader_selector *last = si_get_vs(sctx)->cso;
3441
3442 if ((last && last->so.num_outputs) || sctx->streamout.prims_gen_query_enabled)
3443 new_ngg = false;
3444 }
3445
3446 if (new_ngg != sctx->ngg) {
3447 /* Transitioning from NGG to legacy GS requires VGT_FLUSH on Navi10-14.
3448 * VGT_FLUSH is also emitted at the beginning of IBs when legacy GS ring
3449 * pointers are set.
3450 */
3451 if (sctx->screen->info.has_vgt_flush_ngg_legacy_bug && !new_ngg) {
3452 sctx->flags |= SI_CONTEXT_VGT_FLUSH;
3453 if (sctx->chip_class == GFX10) {
3454 /* Workaround for https://gitlab.freedesktop.org/mesa/mesa/-/issues/2941 */
3455 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
3456 }
3457 }
3458
3459 sctx->ngg = new_ngg;
3460 sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
3461 si_select_draw_vbo(sctx);
3462 return true;
3463 }
3464 return false;
3465 }
3466
si_bind_gs_shader(struct pipe_context * ctx,void * state)3467 static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
3468 {
3469 struct si_context *sctx = (struct si_context *)ctx;
3470 struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
3471 struct si_shader *old_hw_vs_variant = si_get_vs(sctx)->current;
3472 struct si_shader_selector *sel = (struct si_shader_selector*)state;
3473 bool enable_changed = !!sctx->shader.gs.cso != !!sel;
3474 bool ngg_changed;
3475
3476 if (sctx->shader.gs.cso == sel)
3477 return;
3478
3479 sctx->shader.gs.cso = sel;
3480 sctx->shader.gs.current = sel ? sel->first_variant : NULL;
3481 sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
3482
3483 si_update_common_shader_state(sctx, sel, PIPE_SHADER_GEOMETRY);
3484 si_select_draw_vbo(sctx);
3485 sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
3486
3487 ngg_changed = si_update_ngg(sctx);
3488 if (ngg_changed || enable_changed)
3489 si_shader_change_notify(sctx);
3490 if (enable_changed) {
3491 if (sctx->ia_multi_vgt_param_key.u.uses_tess)
3492 si_update_tess_uses_prim_id(sctx);
3493 }
3494 si_update_vs_viewport_state(sctx);
3495 si_update_streamout_state(sctx);
3496 si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant, si_get_vs(sctx)->cso,
3497 si_get_vs(sctx)->current);
3498 si_update_rasterized_prim(sctx);
3499 }
3500
si_bind_tcs_shader(struct pipe_context * ctx,void * state)3501 static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
3502 {
3503 struct si_context *sctx = (struct si_context *)ctx;
3504 struct si_shader_selector *sel = (struct si_shader_selector*)state;
3505 bool enable_changed = !!sctx->shader.tcs.cso != !!sel;
3506
3507 if (sctx->shader.tcs.cso == sel)
3508 return;
3509
3510 sctx->shader.tcs.cso = sel;
3511 sctx->shader.tcs.current = sel ? sel->first_variant : NULL;
3512 sctx->shader.tcs.key.ge.part.tcs.epilog.invoc0_tess_factors_are_def =
3513 sel ? sel->info.tessfactors_are_def_in_all_invocs : 0;
3514 si_update_tess_uses_prim_id(sctx);
3515
3516 si_update_common_shader_state(sctx, sel, PIPE_SHADER_TESS_CTRL);
3517
3518 if (enable_changed)
3519 sctx->last_tcs = NULL; /* invalidate derived tess state */
3520 }
3521
si_bind_tes_shader(struct pipe_context * ctx,void * state)3522 static void si_bind_tes_shader(struct pipe_context *ctx, void *state)
3523 {
3524 struct si_context *sctx = (struct si_context *)ctx;
3525 struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
3526 struct si_shader *old_hw_vs_variant = si_get_vs(sctx)->current;
3527 struct si_shader_selector *sel = (struct si_shader_selector*)state;
3528 bool enable_changed = !!sctx->shader.tes.cso != !!sel;
3529
3530 if (sctx->shader.tes.cso == sel)
3531 return;
3532
3533 sctx->shader.tes.cso = sel;
3534 sctx->shader.tes.current = sel ? sel->first_variant : NULL;
3535 sctx->ia_multi_vgt_param_key.u.uses_tess = sel != NULL;
3536 si_update_tess_uses_prim_id(sctx);
3537
3538 sctx->shader.tcs.key.ge.part.tcs.epilog.prim_mode =
3539 sctx->fixed_func_tcs_shader.key.ge.part.tcs.epilog.prim_mode =
3540 sel ? sel->info.base.tess._primitive_mode : 0;
3541
3542 sctx->shader.tcs.key.ge.part.tcs.epilog.tes_reads_tess_factors =
3543 sctx->fixed_func_tcs_shader.key.ge.part.tcs.epilog.tes_reads_tess_factors =
3544 sel ? sel->info.reads_tess_factors : 0;
3545
3546 si_update_common_shader_state(sctx, sel, PIPE_SHADER_TESS_EVAL);
3547 si_select_draw_vbo(sctx);
3548 sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
3549
3550 bool ngg_changed = si_update_ngg(sctx);
3551 if (ngg_changed || enable_changed)
3552 si_shader_change_notify(sctx);
3553 if (enable_changed)
3554 sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
3555 si_update_vs_viewport_state(sctx);
3556 si_update_streamout_state(sctx);
3557 si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant, si_get_vs(sctx)->cso,
3558 si_get_vs(sctx)->current);
3559 si_update_rasterized_prim(sctx);
3560 }
3561
si_update_ps_kill_enable(struct si_context * sctx)3562 void si_update_ps_kill_enable(struct si_context *sctx)
3563 {
3564 if (!sctx->shader.ps.cso)
3565 return;
3566
3567 unsigned db_shader_control = sctx->shader.ps.cso->db_shader_control |
3568 S_02880C_KILL_ENABLE(sctx->queued.named.dsa->alpha_func != PIPE_FUNC_ALWAYS);
3569
3570 if (sctx->ps_db_shader_control != db_shader_control) {
3571 sctx->ps_db_shader_control = db_shader_control;
3572 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
3573 if (sctx->screen->dpbb_allowed)
3574 si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
3575 }
3576 }
3577
si_update_vrs_flat_shading(struct si_context * sctx)3578 void si_update_vrs_flat_shading(struct si_context *sctx)
3579 {
3580 if (sctx->chip_class >= GFX10_3 && sctx->shader.ps.cso) {
3581 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
3582 struct si_shader_info *info = &sctx->shader.ps.cso->info;
3583 bool allow_flat_shading = info->allow_flat_shading;
3584
3585 if (allow_flat_shading &&
3586 (rs->line_smooth || rs->poly_smooth || rs->poly_stipple_enable ||
3587 (!rs->flatshade && info->uses_interp_color)))
3588 allow_flat_shading = false;
3589
3590 if (sctx->allow_flat_shading != allow_flat_shading) {
3591 sctx->allow_flat_shading = allow_flat_shading;
3592 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
3593 }
3594 }
3595 }
3596
si_bind_ps_shader(struct pipe_context * ctx,void * state)3597 static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
3598 {
3599 struct si_context *sctx = (struct si_context *)ctx;
3600 struct si_shader_selector *old_sel = sctx->shader.ps.cso;
3601 struct si_shader_selector *sel = (struct si_shader_selector*)state;
3602
3603 /* skip if supplied shader is one already in use */
3604 if (old_sel == sel)
3605 return;
3606
3607 sctx->shader.ps.cso = sel;
3608 sctx->shader.ps.current = sel ? sel->first_variant : NULL;
3609
3610 si_update_common_shader_state(sctx, sel, PIPE_SHADER_FRAGMENT);
3611 if (sel) {
3612 if (sctx->ia_multi_vgt_param_key.u.uses_tess)
3613 si_update_tess_uses_prim_id(sctx);
3614
3615 if (!old_sel || old_sel->info.colors_written != sel->info.colors_written)
3616 si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
3617
3618 if (sctx->screen->has_out_of_order_rast &&
3619 (!old_sel || old_sel->info.base.writes_memory != sel->info.base.writes_memory ||
3620 old_sel->info.base.fs.early_fragment_tests !=
3621 sel->info.base.fs.early_fragment_tests))
3622 si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
3623 }
3624 si_update_ps_colorbuf0_slot(sctx);
3625
3626 si_ps_key_update_framebuffer(sctx);
3627 si_ps_key_update_framebuffer_blend(sctx);
3628 si_ps_key_update_blend_rasterizer(sctx);
3629 si_ps_key_update_rasterizer(sctx);
3630 si_ps_key_update_dsa(sctx);
3631 si_ps_key_update_sample_shading(sctx);
3632 si_ps_key_update_framebuffer_rasterizer_sample_shading(sctx);
3633 si_update_ps_inputs_read_or_disabled(sctx);
3634 si_update_ps_kill_enable(sctx);
3635 si_update_vrs_flat_shading(sctx);
3636
3637 if (sctx->screen->dpbb_allowed) {
3638 bool force_off = sel && sel->info.options & SI_PROFILE_PS_NO_BINNING;
3639
3640 if (force_off != sctx->dpbb_force_off_profile_ps) {
3641 sctx->dpbb_force_off_profile_ps = force_off;
3642 si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
3643 }
3644 }
3645 }
3646
si_delete_shader(struct si_context * sctx,struct si_shader * shader)3647 static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
3648 {
3649 if (shader->is_optimized) {
3650 util_queue_drop_job(&sctx->screen->shader_compiler_queue_low_priority, &shader->ready);
3651 }
3652
3653 util_queue_fence_destroy(&shader->ready);
3654
3655 /* If destroyed shaders were not unbound, the next compiled
3656 * shader variant could get the same pointer address and so
3657 * binding it to the same shader stage would be considered
3658 * a no-op, causing random behavior.
3659 */
3660 int state_index = -1;
3661
3662 switch (shader->selector->info.stage) {
3663 case MESA_SHADER_VERTEX:
3664 if (shader->key.ge.as_ls) {
3665 if (sctx->chip_class <= GFX8)
3666 state_index = SI_STATE_IDX(ls);
3667 } else if (shader->key.ge.as_es) {
3668 if (sctx->chip_class <= GFX8)
3669 state_index = SI_STATE_IDX(es);
3670 } else if (shader->key.ge.as_ngg) {
3671 state_index = SI_STATE_IDX(gs);
3672 } else {
3673 state_index = SI_STATE_IDX(vs);
3674 }
3675 break;
3676 case MESA_SHADER_TESS_CTRL:
3677 state_index = SI_STATE_IDX(hs);
3678 break;
3679 case MESA_SHADER_TESS_EVAL:
3680 if (shader->key.ge.as_es) {
3681 if (sctx->chip_class <= GFX8)
3682 state_index = SI_STATE_IDX(es);
3683 } else if (shader->key.ge.as_ngg) {
3684 state_index = SI_STATE_IDX(gs);
3685 } else {
3686 state_index = SI_STATE_IDX(vs);
3687 }
3688 break;
3689 case MESA_SHADER_GEOMETRY:
3690 if (shader->is_gs_copy_shader)
3691 state_index = SI_STATE_IDX(vs);
3692 else
3693 state_index = SI_STATE_IDX(gs);
3694 break;
3695 case MESA_SHADER_FRAGMENT:
3696 state_index = SI_STATE_IDX(ps);
3697 break;
3698 default:;
3699 }
3700
3701 if (shader->gs_copy_shader)
3702 si_delete_shader(sctx, shader->gs_copy_shader);
3703
3704 si_shader_selector_reference(sctx, &shader->previous_stage_sel, NULL);
3705 si_shader_destroy(shader);
3706 si_pm4_free_state(sctx, &shader->pm4, state_index);
3707 }
3708
si_destroy_shader_selector(struct pipe_context * ctx,void * cso)3709 static void si_destroy_shader_selector(struct pipe_context *ctx, void *cso)
3710 {
3711 struct si_context *sctx = (struct si_context *)ctx;
3712 struct si_shader_selector *sel = (struct si_shader_selector *)cso;
3713 struct si_shader *p = sel->first_variant, *c;
3714 enum pipe_shader_type type = pipe_shader_type_from_mesa(sel->info.stage);
3715
3716 util_queue_drop_job(&sctx->screen->shader_compiler_queue, &sel->ready);
3717
3718 if (sctx->shaders[type].cso == sel) {
3719 sctx->shaders[type].cso = NULL;
3720 sctx->shaders[type].current = NULL;
3721 }
3722
3723 while (p) {
3724 c = p->next_variant;
3725 si_delete_shader(sctx, p);
3726 p = c;
3727 }
3728
3729 if (sel->main_shader_part)
3730 si_delete_shader(sctx, sel->main_shader_part);
3731 if (sel->main_shader_part_ls)
3732 si_delete_shader(sctx, sel->main_shader_part_ls);
3733 if (sel->main_shader_part_es)
3734 si_delete_shader(sctx, sel->main_shader_part_es);
3735 if (sel->main_shader_part_ngg)
3736 si_delete_shader(sctx, sel->main_shader_part_ngg);
3737
3738 util_queue_fence_destroy(&sel->ready);
3739 simple_mtx_destroy(&sel->mutex);
3740 ralloc_free(sel->nir);
3741 free(sel->nir_binary);
3742 free(sel);
3743 }
3744
si_delete_shader_selector(struct pipe_context * ctx,void * state)3745 static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
3746 {
3747 struct si_context *sctx = (struct si_context *)ctx;
3748 struct si_shader_selector *sel = (struct si_shader_selector *)state;
3749
3750 si_shader_selector_reference(sctx, &sel, NULL);
3751 }
3752
3753 /**
3754 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
3755 */
si_cs_preamble_add_vgt_flush(struct si_context * sctx)3756 static void si_cs_preamble_add_vgt_flush(struct si_context *sctx)
3757 {
3758 /* We shouldn't get here if registers are shadowed. */
3759 assert(!sctx->shadowed_regs);
3760
3761 if (sctx->cs_preamble_has_vgt_flush)
3762 return;
3763
3764 /* Done by Vulkan before VGT_FLUSH. */
3765 si_pm4_cmd_add(sctx->cs_preamble_state, PKT3(PKT3_EVENT_WRITE, 0, 0));
3766 si_pm4_cmd_add(sctx->cs_preamble_state, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
3767
3768 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
3769 si_pm4_cmd_add(sctx->cs_preamble_state, PKT3(PKT3_EVENT_WRITE, 0, 0));
3770 si_pm4_cmd_add(sctx->cs_preamble_state, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
3771 sctx->cs_preamble_has_vgt_flush = true;
3772 }
3773
3774 /**
3775 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
3776 */
si_emit_vgt_flush(struct radeon_cmdbuf * cs)3777 static void si_emit_vgt_flush(struct radeon_cmdbuf *cs)
3778 {
3779 radeon_begin(cs);
3780
3781 /* This is required before VGT_FLUSH. */
3782 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
3783 radeon_emit(EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
3784
3785 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
3786 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
3787 radeon_emit(EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
3788 radeon_end();
3789 }
3790
3791 /* Initialize state related to ESGS / GSVS ring buffers */
si_update_gs_ring_buffers(struct si_context * sctx)3792 bool si_update_gs_ring_buffers(struct si_context *sctx)
3793 {
3794 struct si_shader_selector *es =
3795 sctx->shader.tes.cso ? sctx->shader.tes.cso : sctx->shader.vs.cso;
3796 struct si_shader_selector *gs = sctx->shader.gs.cso;
3797 struct si_pm4_state *pm4;
3798
3799 /* Chip constants. */
3800 unsigned num_se = sctx->screen->info.max_se;
3801 unsigned wave_size = 64;
3802 unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
3803 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
3804 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
3805 */
3806 unsigned gs_vertex_reuse = (sctx->chip_class >= GFX8 ? 32 : 16) * num_se;
3807 unsigned alignment = 256 * num_se;
3808 /* The maximum size is 63.999 MB per SE. */
3809 unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
3810
3811 /* Calculate the minimum size. */
3812 unsigned min_esgs_ring_size = align(es->esgs_itemsize * gs_vertex_reuse * wave_size, alignment);
3813
3814 /* These are recommended sizes, not minimum sizes. */
3815 unsigned esgs_ring_size =
3816 max_gs_waves * 2 * wave_size * es->esgs_itemsize * gs->gs_input_verts_per_prim;
3817 unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size * gs->max_gsvs_emit_size;
3818
3819 min_esgs_ring_size = align(min_esgs_ring_size, alignment);
3820 esgs_ring_size = align(esgs_ring_size, alignment);
3821 gsvs_ring_size = align(gsvs_ring_size, alignment);
3822
3823 esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
3824 gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
3825
3826 /* Some rings don't have to be allocated if shaders don't use them.
3827 * (e.g. no varyings between ES and GS or GS and VS)
3828 *
3829 * GFX9 doesn't have the ESGS ring.
3830 */
3831 bool update_esgs = sctx->chip_class <= GFX8 && esgs_ring_size &&
3832 (!sctx->esgs_ring || sctx->esgs_ring->width0 < esgs_ring_size);
3833 bool update_gsvs =
3834 gsvs_ring_size && (!sctx->gsvs_ring || sctx->gsvs_ring->width0 < gsvs_ring_size);
3835
3836 if (!update_esgs && !update_gsvs)
3837 return true;
3838
3839 if (update_esgs) {
3840 pipe_resource_reference(&sctx->esgs_ring, NULL);
3841 sctx->esgs_ring =
3842 pipe_aligned_buffer_create(sctx->b.screen,
3843 PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
3844 PIPE_USAGE_DEFAULT,
3845 esgs_ring_size, sctx->screen->info.pte_fragment_size);
3846 if (!sctx->esgs_ring)
3847 return false;
3848 }
3849
3850 if (update_gsvs) {
3851 pipe_resource_reference(&sctx->gsvs_ring, NULL);
3852 sctx->gsvs_ring =
3853 pipe_aligned_buffer_create(sctx->b.screen,
3854 PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
3855 PIPE_USAGE_DEFAULT,
3856 gsvs_ring_size, sctx->screen->info.pte_fragment_size);
3857 if (!sctx->gsvs_ring)
3858 return false;
3859 }
3860
3861 /* Set ring bindings. */
3862 if (sctx->esgs_ring) {
3863 assert(sctx->chip_class <= GFX8);
3864 si_set_ring_buffer(sctx, SI_RING_ESGS, sctx->esgs_ring, 0, sctx->esgs_ring->width0, false,
3865 false, 0, 0, 0);
3866 }
3867 if (sctx->gsvs_ring) {
3868 si_set_ring_buffer(sctx, SI_RING_GSVS, sctx->gsvs_ring, 0, sctx->gsvs_ring->width0, false,
3869 false, 0, 0, 0);
3870 }
3871
3872 if (sctx->shadowed_regs) {
3873 /* These registers will be shadowed, so set them only once. */
3874 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
3875
3876 assert(sctx->chip_class >= GFX7);
3877
3878 si_emit_vgt_flush(cs);
3879
3880 radeon_begin(cs);
3881
3882 /* Set the GS registers. */
3883 if (sctx->esgs_ring) {
3884 assert(sctx->chip_class <= GFX8);
3885 radeon_set_uconfig_reg(R_030900_VGT_ESGS_RING_SIZE,
3886 sctx->esgs_ring->width0 / 256);
3887 }
3888 if (sctx->gsvs_ring) {
3889 radeon_set_uconfig_reg(R_030904_VGT_GSVS_RING_SIZE,
3890 sctx->gsvs_ring->width0 / 256);
3891 }
3892 radeon_end();
3893 return true;
3894 }
3895
3896 /* The codepath without register shadowing. */
3897 /* Create the "cs_preamble_gs_rings" state. */
3898 pm4 = CALLOC_STRUCT(si_pm4_state);
3899 if (!pm4)
3900 return false;
3901
3902 if (sctx->chip_class >= GFX7) {
3903 if (sctx->esgs_ring) {
3904 assert(sctx->chip_class <= GFX8);
3905 si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE, sctx->esgs_ring->width0 / 256);
3906 }
3907 if (sctx->gsvs_ring)
3908 si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE, sctx->gsvs_ring->width0 / 256);
3909 } else {
3910 if (sctx->esgs_ring)
3911 si_pm4_set_reg(pm4, R_0088C8_VGT_ESGS_RING_SIZE, sctx->esgs_ring->width0 / 256);
3912 if (sctx->gsvs_ring)
3913 si_pm4_set_reg(pm4, R_0088CC_VGT_GSVS_RING_SIZE, sctx->gsvs_ring->width0 / 256);
3914 }
3915
3916 /* Set the state. */
3917 if (sctx->cs_preamble_gs_rings)
3918 si_pm4_free_state(sctx, sctx->cs_preamble_gs_rings, ~0);
3919 sctx->cs_preamble_gs_rings = pm4;
3920
3921 si_cs_preamble_add_vgt_flush(sctx);
3922
3923 /* Flush the context to re-emit both cs_preamble states. */
3924 sctx->initial_gfx_cs_size = 0; /* force flush */
3925 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
3926
3927 return true;
3928 }
3929
si_shader_lock(struct si_shader * shader)3930 static void si_shader_lock(struct si_shader *shader)
3931 {
3932 simple_mtx_lock(&shader->selector->mutex);
3933 if (shader->previous_stage_sel) {
3934 assert(shader->previous_stage_sel != shader->selector);
3935 simple_mtx_lock(&shader->previous_stage_sel->mutex);
3936 }
3937 }
3938
si_shader_unlock(struct si_shader * shader)3939 static void si_shader_unlock(struct si_shader *shader)
3940 {
3941 if (shader->previous_stage_sel)
3942 simple_mtx_unlock(&shader->previous_stage_sel->mutex);
3943 simple_mtx_unlock(&shader->selector->mutex);
3944 }
3945
3946 /**
3947 * @returns 1 if \p sel has been updated to use a new scratch buffer
3948 * 0 if not
3949 * < 0 if there was a failure
3950 */
si_update_scratch_buffer(struct si_context * sctx,struct si_shader * shader)3951 static int si_update_scratch_buffer(struct si_context *sctx, struct si_shader *shader)
3952 {
3953 uint64_t scratch_va = sctx->scratch_buffer->gpu_address;
3954
3955 if (!shader)
3956 return 0;
3957
3958 /* This shader doesn't need a scratch buffer */
3959 if (shader->config.scratch_bytes_per_wave == 0)
3960 return 0;
3961
3962 /* Prevent race conditions when updating:
3963 * - si_shader::scratch_bo
3964 * - si_shader::binary::code
3965 * - si_shader::previous_stage::binary::code.
3966 */
3967 si_shader_lock(shader);
3968
3969 /* This shader is already configured to use the current
3970 * scratch buffer. */
3971 if (shader->scratch_bo == sctx->scratch_buffer) {
3972 si_shader_unlock(shader);
3973 return 0;
3974 }
3975
3976 assert(sctx->scratch_buffer);
3977
3978 /* Replace the shader bo with a new bo that has the relocs applied. */
3979 if (!si_shader_binary_upload(sctx->screen, shader, scratch_va)) {
3980 si_shader_unlock(shader);
3981 return -1;
3982 }
3983
3984 /* Update the shader state to use the new shader bo. */
3985 si_shader_init_pm4_state(sctx->screen, shader);
3986
3987 si_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
3988
3989 si_shader_unlock(shader);
3990 return 1;
3991 }
3992
si_get_tcs_current(struct si_context * sctx)3993 static struct si_shader *si_get_tcs_current(struct si_context *sctx)
3994 {
3995 if (!sctx->shader.tes.cso)
3996 return NULL; /* tessellation disabled */
3997
3998 return sctx->shader.tcs.cso ? sctx->shader.tcs.current : sctx->fixed_func_tcs_shader.current;
3999 }
4000
si_update_scratch_relocs(struct si_context * sctx)4001 static bool si_update_scratch_relocs(struct si_context *sctx)
4002 {
4003 struct si_shader *tcs = si_get_tcs_current(sctx);
4004 int r;
4005
4006 /* Update the shaders, so that they are using the latest scratch.
4007 * The scratch buffer may have been changed since these shaders were
4008 * last used, so we still need to try to update them, even if they
4009 * require scratch buffers smaller than the current size.
4010 */
4011 r = si_update_scratch_buffer(sctx, sctx->shader.ps.current);
4012 if (r < 0)
4013 return false;
4014 if (r == 1)
4015 si_pm4_bind_state(sctx, ps, sctx->shader.ps.current);
4016
4017 r = si_update_scratch_buffer(sctx, sctx->shader.gs.current);
4018 if (r < 0)
4019 return false;
4020 if (r == 1)
4021 si_pm4_bind_state(sctx, gs, sctx->shader.gs.current);
4022
4023 r = si_update_scratch_buffer(sctx, tcs);
4024 if (r < 0)
4025 return false;
4026 if (r == 1)
4027 si_pm4_bind_state(sctx, hs, tcs);
4028
4029 /* VS can be bound as LS, ES, or VS. */
4030 r = si_update_scratch_buffer(sctx, sctx->shader.vs.current);
4031 if (r < 0)
4032 return false;
4033 if (r == 1) {
4034 if (sctx->shader.vs.current->key.ge.as_ls)
4035 si_pm4_bind_state(sctx, ls, sctx->shader.vs.current);
4036 else if (sctx->shader.vs.current->key.ge.as_es)
4037 si_pm4_bind_state(sctx, es, sctx->shader.vs.current);
4038 else if (sctx->shader.vs.current->key.ge.as_ngg)
4039 si_pm4_bind_state(sctx, gs, sctx->shader.vs.current);
4040 else
4041 si_pm4_bind_state(sctx, vs, sctx->shader.vs.current);
4042 }
4043
4044 /* TES can be bound as ES or VS. */
4045 r = si_update_scratch_buffer(sctx, sctx->shader.tes.current);
4046 if (r < 0)
4047 return false;
4048 if (r == 1) {
4049 if (sctx->shader.tes.current->key.ge.as_es)
4050 si_pm4_bind_state(sctx, es, sctx->shader.tes.current);
4051 else if (sctx->shader.tes.current->key.ge.as_ngg)
4052 si_pm4_bind_state(sctx, gs, sctx->shader.tes.current);
4053 else
4054 si_pm4_bind_state(sctx, vs, sctx->shader.tes.current);
4055 }
4056
4057 return true;
4058 }
4059
si_update_spi_tmpring_size(struct si_context * sctx,unsigned bytes)4060 bool si_update_spi_tmpring_size(struct si_context *sctx, unsigned bytes)
4061 {
4062 unsigned spi_tmpring_size;
4063 ac_get_scratch_tmpring_size(&sctx->screen->info, sctx->scratch_waves, bytes,
4064 &sctx->max_seen_scratch_bytes_per_wave, &spi_tmpring_size);
4065
4066 unsigned scratch_needed_size = sctx->max_seen_scratch_bytes_per_wave * sctx->scratch_waves;
4067
4068 if (scratch_needed_size > 0) {
4069 if (!sctx->scratch_buffer || scratch_needed_size > sctx->scratch_buffer->b.b.width0) {
4070 /* Create a bigger scratch buffer */
4071 si_resource_reference(&sctx->scratch_buffer, NULL);
4072
4073 sctx->scratch_buffer = si_aligned_buffer_create(
4074 &sctx->screen->b,
4075 PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
4076 PIPE_USAGE_DEFAULT, scratch_needed_size,
4077 sctx->screen->info.pte_fragment_size);
4078 if (!sctx->scratch_buffer)
4079 return false;
4080
4081 si_context_add_resource_size(sctx, &sctx->scratch_buffer->b.b);
4082 }
4083
4084 if (!si_update_scratch_relocs(sctx))
4085 return false;
4086 }
4087
4088 if (spi_tmpring_size != sctx->spi_tmpring_size) {
4089 sctx->spi_tmpring_size = spi_tmpring_size;
4090 si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
4091 }
4092 return true;
4093 }
4094
si_init_tess_factor_ring(struct si_context * sctx)4095 void si_init_tess_factor_ring(struct si_context *sctx)
4096 {
4097 assert(!sctx->tess_rings);
4098 assert(((sctx->screen->tess_factor_ring_size / 4) & C_030938_SIZE) == 0);
4099
4100 /* The address must be aligned to 2^19, because the shader only
4101 * receives the high 13 bits.
4102 */
4103 sctx->tess_rings = pipe_aligned_buffer_create(
4104 sctx->b.screen, SI_RESOURCE_FLAG_32BIT | SI_RESOURCE_FLAG_DRIVER_INTERNAL, PIPE_USAGE_DEFAULT,
4105 sctx->screen->tess_offchip_ring_size + sctx->screen->tess_factor_ring_size, 1 << 19);
4106 if (!sctx->tess_rings)
4107 return;
4108
4109 if (sctx->screen->info.has_tmz_support) {
4110 sctx->tess_rings_tmz = pipe_aligned_buffer_create(
4111 sctx->b.screen,
4112 PIPE_RESOURCE_FLAG_ENCRYPTED | SI_RESOURCE_FLAG_32BIT | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
4113 PIPE_USAGE_DEFAULT,
4114 sctx->screen->tess_offchip_ring_size + sctx->screen->tess_factor_ring_size, 1 << 19);
4115 }
4116
4117 uint64_t factor_va =
4118 si_resource(sctx->tess_rings)->gpu_address + sctx->screen->tess_offchip_ring_size;
4119
4120 if (sctx->shadowed_regs) {
4121 /* These registers will be shadowed, so set them only once. */
4122 /* TODO: tmz + shadowed_regs support */
4123 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
4124
4125 assert(sctx->chip_class >= GFX7);
4126
4127 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(sctx->tess_rings),
4128 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
4129 si_emit_vgt_flush(cs);
4130
4131 /* Set tessellation registers. */
4132 radeon_begin(cs);
4133 radeon_set_uconfig_reg(R_030938_VGT_TF_RING_SIZE,
4134 S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4));
4135 radeon_set_uconfig_reg(R_030940_VGT_TF_MEMORY_BASE, factor_va >> 8);
4136 if (sctx->chip_class >= GFX10) {
4137 radeon_set_uconfig_reg(R_030984_VGT_TF_MEMORY_BASE_HI,
4138 S_030984_BASE_HI(factor_va >> 40));
4139 } else if (sctx->chip_class == GFX9) {
4140 radeon_set_uconfig_reg(R_030944_VGT_TF_MEMORY_BASE_HI,
4141 S_030944_BASE_HI(factor_va >> 40));
4142 }
4143 radeon_set_uconfig_reg(R_03093C_VGT_HS_OFFCHIP_PARAM,
4144 sctx->screen->vgt_hs_offchip_param);
4145 radeon_end();
4146 return;
4147 }
4148
4149 /* The codepath without register shadowing. */
4150 si_cs_preamble_add_vgt_flush(sctx);
4151
4152 /* Append these registers to the init config state. */
4153 if (sctx->chip_class >= GFX7) {
4154 si_pm4_set_reg(sctx->cs_preamble_state, R_030938_VGT_TF_RING_SIZE,
4155 S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4));
4156 si_pm4_set_reg(sctx->cs_preamble_state, R_030940_VGT_TF_MEMORY_BASE, factor_va >> 8);
4157 if (sctx->chip_class >= GFX10)
4158 si_pm4_set_reg(sctx->cs_preamble_state, R_030984_VGT_TF_MEMORY_BASE_HI,
4159 S_030984_BASE_HI(factor_va >> 40));
4160 else if (sctx->chip_class == GFX9)
4161 si_pm4_set_reg(sctx->cs_preamble_state, R_030944_VGT_TF_MEMORY_BASE_HI,
4162 S_030944_BASE_HI(factor_va >> 40));
4163 si_pm4_set_reg(sctx->cs_preamble_state, R_03093C_VGT_HS_OFFCHIP_PARAM,
4164 sctx->screen->vgt_hs_offchip_param);
4165 } else {
4166 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
4167
4168 si_pm4_set_reg(pm4, R_008988_VGT_TF_RING_SIZE,
4169 S_008988_SIZE(sctx->screen->tess_factor_ring_size / 4));
4170 si_pm4_set_reg(pm4, R_0089B8_VGT_TF_MEMORY_BASE, factor_va >> 8);
4171 si_pm4_set_reg(pm4, R_0089B0_VGT_HS_OFFCHIP_PARAM,
4172 sctx->screen->vgt_hs_offchip_param);
4173 sctx->cs_preamble_tess_rings = pm4;
4174
4175 if (sctx->screen->info.has_tmz_support) {
4176 pm4 = CALLOC_STRUCT(si_pm4_state);
4177 uint64_t factor_va_tmz =
4178 si_resource(sctx->tess_rings_tmz)->gpu_address + sctx->screen->tess_offchip_ring_size;
4179 si_pm4_set_reg(pm4, R_008988_VGT_TF_RING_SIZE,
4180 S_008988_SIZE(sctx->screen->tess_factor_ring_size / 4));
4181 si_pm4_set_reg(pm4, R_0089B8_VGT_TF_MEMORY_BASE, factor_va_tmz >> 8);
4182 si_pm4_set_reg(pm4, R_0089B0_VGT_HS_OFFCHIP_PARAM,
4183 sctx->screen->vgt_hs_offchip_param);
4184 sctx->cs_preamble_tess_rings_tmz = pm4;
4185 }
4186 }
4187
4188 /* Flush the context to re-emit the cs_preamble state.
4189 * This is done only once in a lifetime of a context.
4190 */
4191 sctx->initial_gfx_cs_size = 0; /* force flush */
4192 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
4193 }
4194
si_build_vgt_shader_config(struct si_screen * screen,union si_vgt_stages_key key)4195 struct si_pm4_state *si_build_vgt_shader_config(struct si_screen *screen, union si_vgt_stages_key key)
4196 {
4197 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
4198 uint32_t stages = 0;
4199
4200 if (key.u.tess) {
4201 stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
4202
4203 if (key.u.gs)
4204 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | S_028B54_GS_EN(1);
4205 else if (key.u.ngg)
4206 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
4207 else
4208 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
4209 } else if (key.u.gs) {
4210 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | S_028B54_GS_EN(1);
4211 } else if (key.u.ngg) {
4212 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
4213 }
4214
4215 if (key.u.ngg) {
4216 stages |= S_028B54_PRIMGEN_EN(1) |
4217 S_028B54_NGG_WAVE_ID_EN(key.u.streamout) |
4218 S_028B54_PRIMGEN_PASSTHRU_EN(key.u.ngg_passthrough) |
4219 S_028B54_PRIMGEN_PASSTHRU_NO_MSG(key.u.ngg_passthrough &&
4220 screen->info.family >= CHIP_DIMGREY_CAVEFISH);
4221 } else if (key.u.gs)
4222 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
4223
4224 if (screen->info.chip_class >= GFX9)
4225 stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
4226
4227 if (screen->info.chip_class >= GFX10) {
4228 stages |= S_028B54_HS_W32_EN(key.u.hs_wave32) |
4229 S_028B54_GS_W32_EN(key.u.gs_wave32) |
4230 S_028B54_VS_W32_EN(key.u.vs_wave32);
4231 /* Legacy GS only supports Wave64. Read it as an implication. */
4232 assert(!(key.u.gs && !key.u.ngg) || !key.u.gs_wave32);
4233 }
4234
4235 si_pm4_set_reg(pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
4236 return pm4;
4237 }
4238
si_emit_scratch_state(struct si_context * sctx)4239 static void si_emit_scratch_state(struct si_context *sctx)
4240 {
4241 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
4242
4243 radeon_begin(cs);
4244 radeon_set_context_reg(R_0286E8_SPI_TMPRING_SIZE, sctx->spi_tmpring_size);
4245 radeon_end();
4246
4247 if (sctx->scratch_buffer) {
4248 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->scratch_buffer,
4249 RADEON_USAGE_READWRITE | RADEON_PRIO_SCRATCH_BUFFER);
4250 }
4251 }
4252
si_init_screen_live_shader_cache(struct si_screen * sscreen)4253 void si_init_screen_live_shader_cache(struct si_screen *sscreen)
4254 {
4255 util_live_shader_cache_init(&sscreen->live_shader_cache, si_create_shader_selector,
4256 si_destroy_shader_selector);
4257 }
4258
si_init_shader_functions(struct si_context * sctx)4259 void si_init_shader_functions(struct si_context *sctx)
4260 {
4261 sctx->atoms.s.scratch_state.emit = si_emit_scratch_state;
4262
4263 sctx->b.create_vs_state = si_create_shader;
4264 sctx->b.create_tcs_state = si_create_shader;
4265 sctx->b.create_tes_state = si_create_shader;
4266 sctx->b.create_gs_state = si_create_shader;
4267 sctx->b.create_fs_state = si_create_shader;
4268
4269 sctx->b.bind_vs_state = si_bind_vs_shader;
4270 sctx->b.bind_tcs_state = si_bind_tcs_shader;
4271 sctx->b.bind_tes_state = si_bind_tes_shader;
4272 sctx->b.bind_gs_state = si_bind_gs_shader;
4273 sctx->b.bind_fs_state = si_bind_ps_shader;
4274
4275 sctx->b.delete_vs_state = si_delete_shader_selector;
4276 sctx->b.delete_tcs_state = si_delete_shader_selector;
4277 sctx->b.delete_tes_state = si_delete_shader_selector;
4278 sctx->b.delete_gs_state = si_delete_shader_selector;
4279 sctx->b.delete_fs_state = si_delete_shader_selector;
4280 }
4281