1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "ac_shader_util.h"
25 #include "ac_gpu_info.h"
26 
27 #include "sid.h"
28 #include "u_math.h"
29 
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <string.h>
33 
ac_get_spi_shader_z_format(bool writes_z,bool writes_stencil,bool writes_samplemask)34 unsigned ac_get_spi_shader_z_format(bool writes_z, bool writes_stencil, bool writes_samplemask)
35 {
36    if (writes_z) {
37       /* Z needs 32 bits. */
38       if (writes_samplemask)
39          return V_028710_SPI_SHADER_32_ABGR;
40       else if (writes_stencil)
41          return V_028710_SPI_SHADER_32_GR;
42       else
43          return V_028710_SPI_SHADER_32_R;
44    } else if (writes_stencil || writes_samplemask) {
45       /* Both stencil and sample mask need only 16 bits. */
46       return V_028710_SPI_SHADER_UINT16_ABGR;
47    } else {
48       return V_028710_SPI_SHADER_ZERO;
49    }
50 }
51 
ac_get_cb_shader_mask(unsigned spi_shader_col_format)52 unsigned ac_get_cb_shader_mask(unsigned spi_shader_col_format)
53 {
54    unsigned i, cb_shader_mask = 0;
55 
56    for (i = 0; i < 8; i++) {
57       switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
58       case V_028714_SPI_SHADER_ZERO:
59          break;
60       case V_028714_SPI_SHADER_32_R:
61          cb_shader_mask |= 0x1 << (i * 4);
62          break;
63       case V_028714_SPI_SHADER_32_GR:
64          cb_shader_mask |= 0x3 << (i * 4);
65          break;
66       case V_028714_SPI_SHADER_32_AR:
67          cb_shader_mask |= 0x9u << (i * 4);
68          break;
69       case V_028714_SPI_SHADER_FP16_ABGR:
70       case V_028714_SPI_SHADER_UNORM16_ABGR:
71       case V_028714_SPI_SHADER_SNORM16_ABGR:
72       case V_028714_SPI_SHADER_UINT16_ABGR:
73       case V_028714_SPI_SHADER_SINT16_ABGR:
74       case V_028714_SPI_SHADER_32_ABGR:
75          cb_shader_mask |= 0xfu << (i * 4);
76          break;
77       default:
78          assert(0);
79       }
80    }
81    return cb_shader_mask;
82 }
83 
84 /**
85  * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
86  * geometry shader.
87  */
ac_vgt_gs_mode(unsigned gs_max_vert_out,enum chip_class chip_class)88 uint32_t ac_vgt_gs_mode(unsigned gs_max_vert_out, enum chip_class chip_class)
89 {
90    unsigned cut_mode;
91 
92    if (gs_max_vert_out <= 128) {
93       cut_mode = V_028A40_GS_CUT_128;
94    } else if (gs_max_vert_out <= 256) {
95       cut_mode = V_028A40_GS_CUT_256;
96    } else if (gs_max_vert_out <= 512) {
97       cut_mode = V_028A40_GS_CUT_512;
98    } else {
99       assert(gs_max_vert_out <= 1024);
100       cut_mode = V_028A40_GS_CUT_1024;
101    }
102 
103    return S_028A40_MODE(V_028A40_GS_SCENARIO_G) | S_028A40_CUT_MODE(cut_mode) |
104           S_028A40_ES_WRITE_OPTIMIZE(chip_class <= GFX8) | S_028A40_GS_WRITE_OPTIMIZE(1) |
105           S_028A40_ONCHIP(chip_class >= GFX9 ? 1 : 0);
106 }
107 
108 /// Translate a (dfmt, nfmt) pair into a chip-appropriate combined format
109 /// value for LLVM8+ tbuffer intrinsics.
ac_get_tbuffer_format(enum chip_class chip_class,unsigned dfmt,unsigned nfmt)110 unsigned ac_get_tbuffer_format(enum chip_class chip_class, unsigned dfmt, unsigned nfmt)
111 {
112    // Some games try to access vertex buffers without a valid format.
113    // This is a game bug, but we should still handle it gracefully.
114    if (dfmt == V_008F0C_GFX10_FORMAT_INVALID)
115       return V_008F0C_GFX10_FORMAT_INVALID;
116 
117    if (chip_class >= GFX10) {
118       unsigned format;
119       switch (dfmt) {
120       default:
121          unreachable("bad dfmt");
122       case V_008F0C_BUF_DATA_FORMAT_INVALID:
123          format = V_008F0C_GFX10_FORMAT_INVALID;
124          break;
125       case V_008F0C_BUF_DATA_FORMAT_8:
126          format = V_008F0C_GFX10_FORMAT_8_UINT;
127          break;
128       case V_008F0C_BUF_DATA_FORMAT_8_8:
129          format = V_008F0C_GFX10_FORMAT_8_8_UINT;
130          break;
131       case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
132          format = V_008F0C_GFX10_FORMAT_8_8_8_8_UINT;
133          break;
134       case V_008F0C_BUF_DATA_FORMAT_16:
135          format = V_008F0C_GFX10_FORMAT_16_UINT;
136          break;
137       case V_008F0C_BUF_DATA_FORMAT_16_16:
138          format = V_008F0C_GFX10_FORMAT_16_16_UINT;
139          break;
140       case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
141          format = V_008F0C_GFX10_FORMAT_16_16_16_16_UINT;
142          break;
143       case V_008F0C_BUF_DATA_FORMAT_32:
144          format = V_008F0C_GFX10_FORMAT_32_UINT;
145          break;
146       case V_008F0C_BUF_DATA_FORMAT_32_32:
147          format = V_008F0C_GFX10_FORMAT_32_32_UINT;
148          break;
149       case V_008F0C_BUF_DATA_FORMAT_32_32_32:
150          format = V_008F0C_GFX10_FORMAT_32_32_32_UINT;
151          break;
152       case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
153          format = V_008F0C_GFX10_FORMAT_32_32_32_32_UINT;
154          break;
155       case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
156          format = V_008F0C_GFX10_FORMAT_2_10_10_10_UINT;
157          break;
158       case V_008F0C_BUF_DATA_FORMAT_10_11_11:
159          format = V_008F0C_GFX10_FORMAT_10_11_11_UINT;
160          break;
161       }
162 
163       // Use the regularity properties of the combined format enum.
164       //
165       // Note: float is incompatible with 8-bit data formats,
166       //       [us]{norm,scaled} are incomparible with 32-bit data formats.
167       //       [us]scaled are not writable.
168       switch (nfmt) {
169       case V_008F0C_BUF_NUM_FORMAT_UNORM:
170          format -= 4;
171          break;
172       case V_008F0C_BUF_NUM_FORMAT_SNORM:
173          format -= 3;
174          break;
175       case V_008F0C_BUF_NUM_FORMAT_USCALED:
176          format -= 2;
177          break;
178       case V_008F0C_BUF_NUM_FORMAT_SSCALED:
179          format -= 1;
180          break;
181       default:
182          unreachable("bad nfmt");
183       case V_008F0C_BUF_NUM_FORMAT_UINT:
184          break;
185       case V_008F0C_BUF_NUM_FORMAT_SINT:
186          format += 1;
187          break;
188       case V_008F0C_BUF_NUM_FORMAT_FLOAT:
189          format += 2;
190          break;
191       }
192 
193       return format;
194    } else {
195       return dfmt | (nfmt << 4);
196    }
197 }
198 
199 static const struct ac_data_format_info data_format_table[] = {
200    [V_008F0C_BUF_DATA_FORMAT_INVALID] = {0, 4, 0, V_008F0C_BUF_DATA_FORMAT_INVALID},
201    [V_008F0C_BUF_DATA_FORMAT_8] = {1, 1, 1, V_008F0C_BUF_DATA_FORMAT_8},
202    [V_008F0C_BUF_DATA_FORMAT_16] = {2, 1, 2, V_008F0C_BUF_DATA_FORMAT_16},
203    [V_008F0C_BUF_DATA_FORMAT_8_8] = {2, 2, 1, V_008F0C_BUF_DATA_FORMAT_8},
204    [V_008F0C_BUF_DATA_FORMAT_32] = {4, 1, 4, V_008F0C_BUF_DATA_FORMAT_32},
205    [V_008F0C_BUF_DATA_FORMAT_16_16] = {4, 2, 2, V_008F0C_BUF_DATA_FORMAT_16},
206    [V_008F0C_BUF_DATA_FORMAT_10_11_11] = {4, 3, 0, V_008F0C_BUF_DATA_FORMAT_10_11_11},
207    [V_008F0C_BUF_DATA_FORMAT_11_11_10] = {4, 3, 0, V_008F0C_BUF_DATA_FORMAT_11_11_10},
208    [V_008F0C_BUF_DATA_FORMAT_10_10_10_2] = {4, 4, 0, V_008F0C_BUF_DATA_FORMAT_10_10_10_2},
209    [V_008F0C_BUF_DATA_FORMAT_2_10_10_10] = {4, 4, 0, V_008F0C_BUF_DATA_FORMAT_2_10_10_10},
210    [V_008F0C_BUF_DATA_FORMAT_8_8_8_8] = {4, 4, 1, V_008F0C_BUF_DATA_FORMAT_8},
211    [V_008F0C_BUF_DATA_FORMAT_32_32] = {8, 2, 4, V_008F0C_BUF_DATA_FORMAT_32},
212    [V_008F0C_BUF_DATA_FORMAT_16_16_16_16] = {8, 4, 2, V_008F0C_BUF_DATA_FORMAT_16},
213    [V_008F0C_BUF_DATA_FORMAT_32_32_32] = {12, 3, 4, V_008F0C_BUF_DATA_FORMAT_32},
214    [V_008F0C_BUF_DATA_FORMAT_32_32_32_32] = {16, 4, 4, V_008F0C_BUF_DATA_FORMAT_32},
215 };
216 
ac_get_data_format_info(unsigned dfmt)217 const struct ac_data_format_info *ac_get_data_format_info(unsigned dfmt)
218 {
219    assert(dfmt < ARRAY_SIZE(data_format_table));
220    return &data_format_table[dfmt];
221 }
222 
ac_get_sampler_dim(enum chip_class chip_class,enum glsl_sampler_dim dim,bool is_array)223 enum ac_image_dim ac_get_sampler_dim(enum chip_class chip_class, enum glsl_sampler_dim dim,
224                                      bool is_array)
225 {
226    switch (dim) {
227    case GLSL_SAMPLER_DIM_1D:
228       if (chip_class == GFX9)
229          return is_array ? ac_image_2darray : ac_image_2d;
230       return is_array ? ac_image_1darray : ac_image_1d;
231    case GLSL_SAMPLER_DIM_2D:
232    case GLSL_SAMPLER_DIM_RECT:
233    case GLSL_SAMPLER_DIM_EXTERNAL:
234       return is_array ? ac_image_2darray : ac_image_2d;
235    case GLSL_SAMPLER_DIM_3D:
236       return ac_image_3d;
237    case GLSL_SAMPLER_DIM_CUBE:
238       return ac_image_cube;
239    case GLSL_SAMPLER_DIM_MS:
240       return is_array ? ac_image_2darraymsaa : ac_image_2dmsaa;
241    case GLSL_SAMPLER_DIM_SUBPASS:
242       return ac_image_2darray;
243    case GLSL_SAMPLER_DIM_SUBPASS_MS:
244       return ac_image_2darraymsaa;
245    default:
246       unreachable("bad sampler dim");
247    }
248 }
249 
ac_get_image_dim(enum chip_class chip_class,enum glsl_sampler_dim sdim,bool is_array)250 enum ac_image_dim ac_get_image_dim(enum chip_class chip_class, enum glsl_sampler_dim sdim,
251                                    bool is_array)
252 {
253    enum ac_image_dim dim = ac_get_sampler_dim(chip_class, sdim, is_array);
254 
255    /* Match the resource type set in the descriptor. */
256    if (dim == ac_image_cube || (chip_class <= GFX8 && dim == ac_image_3d))
257       dim = ac_image_2darray;
258    else if (sdim == GLSL_SAMPLER_DIM_2D && !is_array && chip_class == GFX9) {
259       /* When a single layer of a 3D texture is bound, the shader
260        * will refer to a 2D target, but the descriptor has a 3D type.
261        * Since the HW ignores BASE_ARRAY in this case, we need to
262        * send 3 coordinates. This doesn't hurt when the underlying
263        * texture is non-3D.
264        */
265       dim = ac_image_3d;
266    }
267 
268    return dim;
269 }
270 
ac_get_fs_input_vgpr_cnt(const struct ac_shader_config * config,signed char * face_vgpr_index_ptr,signed char * ancillary_vgpr_index_ptr)271 unsigned ac_get_fs_input_vgpr_cnt(const struct ac_shader_config *config,
272                                   signed char *face_vgpr_index_ptr,
273                                   signed char *ancillary_vgpr_index_ptr)
274 {
275    unsigned num_input_vgprs = 0;
276    signed char face_vgpr_index = -1;
277    signed char ancillary_vgpr_index = -1;
278 
279    if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
280       num_input_vgprs += 2;
281    if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
282       num_input_vgprs += 2;
283    if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
284       num_input_vgprs += 2;
285    if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
286       num_input_vgprs += 3;
287    if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
288       num_input_vgprs += 2;
289    if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
290       num_input_vgprs += 2;
291    if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
292       num_input_vgprs += 2;
293    if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
294       num_input_vgprs += 1;
295    if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
296       num_input_vgprs += 1;
297    if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
298       num_input_vgprs += 1;
299    if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
300       num_input_vgprs += 1;
301    if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
302       num_input_vgprs += 1;
303    if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr)) {
304       face_vgpr_index = num_input_vgprs;
305       num_input_vgprs += 1;
306    }
307    if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr)) {
308       ancillary_vgpr_index = num_input_vgprs;
309       num_input_vgprs += 1;
310    }
311    if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
312       num_input_vgprs += 1;
313    if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
314       num_input_vgprs += 1;
315 
316    if (face_vgpr_index_ptr)
317       *face_vgpr_index_ptr = face_vgpr_index;
318    if (ancillary_vgpr_index_ptr)
319       *ancillary_vgpr_index_ptr = ancillary_vgpr_index;
320 
321    return num_input_vgprs;
322 }
323 
ac_choose_spi_color_formats(unsigned format,unsigned swap,unsigned ntype,bool is_depth,bool use_rbplus,struct ac_spi_color_formats * formats)324 void ac_choose_spi_color_formats(unsigned format, unsigned swap, unsigned ntype,
325                                  bool is_depth, bool use_rbplus,
326                                  struct ac_spi_color_formats *formats)
327 {
328    /* Alpha is needed for alpha-to-coverage.
329     * Blending may be with or without alpha.
330     */
331    unsigned normal = 0;      /* most optimal, may not support blending or export alpha */
332    unsigned alpha = 0;       /* exports alpha, but may not support blending */
333    unsigned blend = 0;       /* supports blending, but may not export alpha */
334    unsigned blend_alpha = 0; /* least optimal, supports blending and exports alpha */
335 
336    /* Choose the SPI color formats. These are required values for RB+.
337     * Other chips have multiple choices, though they are not necessarily better.
338     */
339    switch (format) {
340    case V_028C70_COLOR_5_6_5:
341    case V_028C70_COLOR_1_5_5_5:
342    case V_028C70_COLOR_5_5_5_1:
343    case V_028C70_COLOR_4_4_4_4:
344    case V_028C70_COLOR_10_11_11:
345    case V_028C70_COLOR_11_11_10:
346    case V_028C70_COLOR_5_9_9_9:
347    case V_028C70_COLOR_8:
348    case V_028C70_COLOR_8_8:
349    case V_028C70_COLOR_8_8_8_8:
350    case V_028C70_COLOR_10_10_10_2:
351    case V_028C70_COLOR_2_10_10_10:
352       if (ntype == V_028C70_NUMBER_UINT)
353          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
354       else if (ntype == V_028C70_NUMBER_SINT)
355          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
356       else
357          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
358 
359       if (!use_rbplus && format == V_028C70_COLOR_8 &&
360           ntype != V_028C70_NUMBER_SRGB && swap == V_028C70_SWAP_STD) /* R */ {
361          /* When RB+ is enabled, R8_UNORM should use FP16_ABGR for 2x
362           * exporting performance. Otherwise, use 32_R to remove useless
363           * instructions needed for 16-bit compressed exports.
364           */
365          blend = normal = V_028714_SPI_SHADER_32_R;
366       }
367       break;
368 
369    case V_028C70_COLOR_16:
370    case V_028C70_COLOR_16_16:
371    case V_028C70_COLOR_16_16_16_16:
372       if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM) {
373          /* UNORM16 and SNORM16 don't support blending */
374          if (ntype == V_028C70_NUMBER_UNORM)
375             normal = alpha = V_028714_SPI_SHADER_UNORM16_ABGR;
376          else
377             normal = alpha = V_028714_SPI_SHADER_SNORM16_ABGR;
378 
379          /* Use 32 bits per channel for blending. */
380          if (format == V_028C70_COLOR_16) {
381             if (swap == V_028C70_SWAP_STD) { /* R */
382                blend = V_028714_SPI_SHADER_32_R;
383                blend_alpha = V_028714_SPI_SHADER_32_AR;
384             } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
385                blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
386             else
387                assert(0);
388          } else if (format == V_028C70_COLOR_16_16) {
389             if (swap == V_028C70_SWAP_STD) { /* RG */
390                blend = V_028714_SPI_SHADER_32_GR;
391                blend_alpha = V_028714_SPI_SHADER_32_ABGR;
392             } else if (swap == V_028C70_SWAP_ALT) /* RA */
393                blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
394             else
395                assert(0);
396          } else /* 16_16_16_16 */
397             blend = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
398       } else if (ntype == V_028C70_NUMBER_UINT)
399          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
400       else if (ntype == V_028C70_NUMBER_SINT)
401          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
402       else if (ntype == V_028C70_NUMBER_FLOAT)
403          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
404       else
405          assert(0);
406       break;
407 
408    case V_028C70_COLOR_32:
409       if (swap == V_028C70_SWAP_STD) { /* R */
410          blend = normal = V_028714_SPI_SHADER_32_R;
411          alpha = blend_alpha = V_028714_SPI_SHADER_32_AR;
412       } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
413          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
414       else
415          assert(0);
416       break;
417 
418    case V_028C70_COLOR_32_32:
419       if (swap == V_028C70_SWAP_STD) { /* RG */
420          blend = normal = V_028714_SPI_SHADER_32_GR;
421          alpha = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
422       } else if (swap == V_028C70_SWAP_ALT) /* RA */
423          alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
424       else
425          assert(0);
426       break;
427 
428    case V_028C70_COLOR_32_32_32_32:
429    case V_028C70_COLOR_8_24:
430    case V_028C70_COLOR_24_8:
431    case V_028C70_COLOR_X24_8_32_FLOAT:
432       alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
433       break;
434 
435    default:
436       assert(0);
437       return;
438    }
439 
440    /* The DB->CB copy needs 32_ABGR. */
441    if (is_depth)
442       alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
443 
444    formats->normal = normal;
445    formats->alpha = alpha;
446    formats->blend = blend;
447    formats->blend_alpha = blend_alpha;
448 }
449 
ac_compute_late_alloc(const struct radeon_info * info,bool ngg,bool ngg_culling,bool uses_scratch,unsigned * late_alloc_wave64,unsigned * cu_mask)450 void ac_compute_late_alloc(const struct radeon_info *info, bool ngg, bool ngg_culling,
451                            bool uses_scratch, unsigned *late_alloc_wave64, unsigned *cu_mask)
452 {
453    *late_alloc_wave64 = 0; /* The limit is per SA. */
454    *cu_mask = 0xffff;
455 
456    /* CU masking can decrease performance and cause a hang with <= 2 CUs per SA. */
457    if (info->min_good_cu_per_sa <= 2)
458       return;
459 
460    /* If scratch is used with late alloc, the GPU could deadlock if PS uses scratch too. A more
461     * complicated computation is needed to enable late alloc with scratch (see PAL).
462     */
463    if (uses_scratch)
464       return;
465 
466    /* Late alloc is not used for NGG on Navi14 due to a hw bug. */
467    if (ngg && info->family == CHIP_NAVI14)
468       return;
469 
470    if (info->chip_class >= GFX10) {
471       /* For Wave32, the hw will launch twice the number of late alloc waves, so 1 == 2x wave32.
472        * These limits are estimated because they are all safe but they vary in performance.
473        */
474       if (ngg_culling)
475          *late_alloc_wave64 = info->min_good_cu_per_sa * 10;
476       else
477          *late_alloc_wave64 = info->min_good_cu_per_sa * 4;
478 
479       /* Limit LATE_ALLOC_GS to prevent a hang (hw bug) on gfx10. */
480       if (info->chip_class == GFX10 && ngg)
481          *late_alloc_wave64 = MIN2(*late_alloc_wave64, 64);
482 
483       /* Gfx10: CU2 & CU3 must be disabled to prevent a hw deadlock.
484        * Others: CU1 must be disabled to prevent a hw deadlock.
485        *
486        * The deadlock is caused by late alloc, which usually increases performance.
487        */
488       *cu_mask &= info->chip_class == GFX10 ? ~BITFIELD_RANGE(2, 2) :
489                                               ~BITFIELD_RANGE(1, 1);
490    } else {
491       if (info->min_good_cu_per_sa <= 4) {
492          /* Too few available compute units per SA. Disallowing VS to run on one CU could hurt us
493           * more than late VS allocation would help.
494           *
495           * 2 is the highest safe number that allows us to keep all CUs enabled.
496           */
497          *late_alloc_wave64 = 2;
498       } else {
499          /* This is a good initial value, allowing 1 late_alloc wave per SIMD on num_cu - 2.
500           */
501          *late_alloc_wave64 = (info->min_good_cu_per_sa - 2) * 4;
502       }
503 
504       /* VS can't execute on one CU if the limit is > 2. */
505       if (*late_alloc_wave64 > 2)
506          *cu_mask = 0xfffe; /* 1 CU disabled */
507    }
508 
509    /* Max number that fits into the register field. */
510    if (ngg) /* GS */
511       *late_alloc_wave64 = MIN2(*late_alloc_wave64, G_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(~0u));
512    else /* VS */
513       *late_alloc_wave64 = MIN2(*late_alloc_wave64, G_00B11C_LIMIT(~0u));
514 }
515 
ac_compute_cs_workgroup_size(uint16_t sizes[3],bool variable,unsigned max)516 unsigned ac_compute_cs_workgroup_size(uint16_t sizes[3], bool variable, unsigned max)
517 {
518    if (variable)
519       return max;
520 
521    return sizes[0] * sizes[1] * sizes[2];
522 }
523 
ac_compute_lshs_workgroup_size(enum chip_class chip_class,gl_shader_stage stage,unsigned tess_num_patches,unsigned tess_patch_in_vtx,unsigned tess_patch_out_vtx)524 unsigned ac_compute_lshs_workgroup_size(enum chip_class chip_class, gl_shader_stage stage,
525                                         unsigned tess_num_patches,
526                                         unsigned tess_patch_in_vtx,
527                                         unsigned tess_patch_out_vtx)
528 {
529    /* When tessellation is used, API VS runs on HW LS, API TCS runs on HW HS.
530     * These two HW stages are merged on GFX9+.
531     */
532 
533    bool merged_shaders = chip_class >= GFX9;
534    unsigned ls_workgroup_size = tess_num_patches * tess_patch_in_vtx;
535    unsigned hs_workgroup_size = tess_num_patches * tess_patch_out_vtx;
536 
537    if (merged_shaders)
538       return MAX2(ls_workgroup_size, hs_workgroup_size);
539    else if (stage == MESA_SHADER_VERTEX)
540       return ls_workgroup_size;
541    else if (stage == MESA_SHADER_TESS_CTRL)
542       return hs_workgroup_size;
543    else
544       unreachable("invalid LSHS shader stage");
545 }
546 
ac_compute_esgs_workgroup_size(enum chip_class chip_class,unsigned wave_size,unsigned es_verts,unsigned gs_inst_prims)547 unsigned ac_compute_esgs_workgroup_size(enum chip_class chip_class, unsigned wave_size,
548                                         unsigned es_verts, unsigned gs_inst_prims)
549 {
550    /* ESGS may operate in workgroups if on-chip GS (LDS rings) are enabled.
551     *
552     * GFX6: Not possible in the HW.
553     * GFX7-8 (unmerged): possible in the HW, but not implemented in Mesa.
554     * GFX9+ (merged): implemented in Mesa.
555     */
556 
557    if (chip_class <= GFX8)
558       return wave_size;
559 
560    unsigned workgroup_size = MAX2(es_verts, gs_inst_prims);
561    return CLAMP(workgroup_size, 1, 256);
562 }
563 
ac_compute_ngg_workgroup_size(unsigned es_verts,unsigned gs_inst_prims,unsigned max_vtx_out,unsigned prim_amp_factor)564 unsigned ac_compute_ngg_workgroup_size(unsigned es_verts, unsigned gs_inst_prims,
565                                        unsigned max_vtx_out, unsigned prim_amp_factor)
566 {
567    /* NGG always operates in workgroups.
568     *
569     * For API VS/TES/GS:
570     * - 1 invocation per input vertex
571     * - 1 invocation per input primitive
572     *
573     * The same invocation can process both an input vertex and primitive,
574     * however 1 invocation can only output up to 1 vertex and 1 primitive.
575     */
576 
577    unsigned max_vtx_in = es_verts < 256 ? es_verts : 3 * gs_inst_prims;
578    unsigned max_prim_in = gs_inst_prims;
579    unsigned max_prim_out = gs_inst_prims * prim_amp_factor;
580    unsigned workgroup_size = MAX4(max_vtx_in, max_vtx_out, max_prim_in, max_prim_out);
581 
582    return CLAMP(workgroup_size, 1, 256);
583 }
584