1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 /* Resource binding slots and sampler states (each described with 8 or
26  * 4 dwords) are stored in lists in memory which is accessed by shaders
27  * using scalar load instructions.
28  *
29  * This file is responsible for managing such lists. It keeps a copy of all
30  * descriptors in CPU memory and re-uploads a whole list if some slots have
31  * been changed.
32  *
33  * This code is also responsible for updating shader pointers to those lists.
34  *
35  * Note that CP DMA can't be used for updating the lists, because a GPU hang
36  * could leave the list in a mid-IB state and the next IB would get wrong
37  * descriptors and the whole context would be unusable at that point.
38  * (Note: The register shadowing can't be used due to the same reason)
39  *
40  * Also, uploading descriptors to newly allocated memory doesn't require
41  * a KCACHE flush.
42  *
43  *
44  * Possible scenarios for one 16 dword image+sampler slot:
45  *
46  *       | Image        | w/ FMASK   | Buffer       | NULL
47  * [ 0: 3] Image[0:3]   | Image[0:3] | Null[0:3]    | Null[0:3]
48  * [ 4: 7] Image[4:7]   | Image[4:7] | Buffer[0:3]  | 0
49  * [ 8:11] Null[0:3]    | Fmask[0:3] | Null[0:3]    | Null[0:3]
50  * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
51  *
52  * FMASK implies MSAA, therefore no sampler state.
53  * Sampler states are never unbound except when FMASK is bound.
54  */
55 
56 #include "si_pipe.h"
57 #include "si_compute.h"
58 #include "si_build_pm4.h"
59 #include "sid.h"
60 #include "util/format/u_format.h"
61 #include "util/hash_table.h"
62 #include "util/u_idalloc.h"
63 #include "util/u_memory.h"
64 #include "util/u_upload_mgr.h"
65 
66 /* NULL image and buffer descriptor for textures (alpha = 1) and images
67  * (alpha = 0).
68  *
69  * For images, all fields must be zero except for the swizzle, which
70  * supports arbitrary combinations of 0s and 1s. The texture type must be
71  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
72  *
73  * For buffers, all fields must be zero. If they are not, the hw hangs.
74  *
75  * This is the only reason why the buffer descriptor must be in words [4:7].
76  */
77 static uint32_t null_texture_descriptor[8] = {
78    0, 0, 0, S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) | S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
79    /* the rest must contain zeros, which is also used by the buffer
80     * descriptor */
81 };
82 
83 static uint32_t null_image_descriptor[8] = {
84    0, 0, 0, S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
85    /* the rest must contain zeros, which is also used by the buffer
86     * descriptor */
87 };
88 
si_desc_extract_buffer_address(const uint32_t * desc)89 static uint64_t si_desc_extract_buffer_address(const uint32_t *desc)
90 {
91    uint64_t va = desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
92 
93    /* Sign-extend the 48-bit address. */
94    va <<= 16;
95    va = (int64_t)va >> 16;
96    return va;
97 }
98 
si_init_descriptor_list(uint32_t * desc_list,unsigned element_dw_size,unsigned num_elements,const uint32_t * null_descriptor)99 static void si_init_descriptor_list(uint32_t *desc_list, unsigned element_dw_size,
100                                     unsigned num_elements, const uint32_t *null_descriptor)
101 {
102    int i;
103 
104    /* Initialize the array to NULL descriptors if the element size is 8. */
105    if (null_descriptor) {
106       assert(element_dw_size % 8 == 0);
107       for (i = 0; i < num_elements * element_dw_size / 8; i++)
108          memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
109    }
110 }
111 
si_init_descriptors(struct si_descriptors * desc,short shader_userdata_rel_index,unsigned element_dw_size,unsigned num_elements)112 static void si_init_descriptors(struct si_descriptors *desc, short shader_userdata_rel_index,
113                                 unsigned element_dw_size, unsigned num_elements)
114 {
115    desc->list = CALLOC(num_elements, element_dw_size * 4);
116    desc->element_dw_size = element_dw_size;
117    desc->num_elements = num_elements;
118    desc->shader_userdata_offset = shader_userdata_rel_index * 4;
119    desc->slot_index_to_bind_directly = -1;
120 }
121 
si_release_descriptors(struct si_descriptors * desc)122 static void si_release_descriptors(struct si_descriptors *desc)
123 {
124    si_resource_reference(&desc->buffer, NULL);
125    FREE(desc->list);
126 }
127 
si_upload_descriptors(struct si_context * sctx,struct si_descriptors * desc)128 static bool si_upload_descriptors(struct si_context *sctx, struct si_descriptors *desc)
129 {
130    unsigned slot_size = desc->element_dw_size * 4;
131    unsigned first_slot_offset = desc->first_active_slot * slot_size;
132    unsigned upload_size = desc->num_active_slots * slot_size;
133 
134    /* Skip the upload if no shader is using the descriptors. dirty_mask
135     * will stay dirty and the descriptors will be uploaded when there is
136     * a shader using them.
137     */
138    if (!upload_size)
139       return true;
140 
141    /* If there is just one active descriptor, bind it directly. */
142    if ((int)desc->first_active_slot == desc->slot_index_to_bind_directly &&
143        desc->num_active_slots == 1) {
144       uint32_t *descriptor = &desc->list[desc->slot_index_to_bind_directly * desc->element_dw_size];
145 
146       /* The buffer is already in the buffer list. */
147       si_resource_reference(&desc->buffer, NULL);
148       desc->gpu_list = NULL;
149       desc->gpu_address = si_desc_extract_buffer_address(descriptor);
150       return true;
151    }
152 
153    uint32_t *ptr;
154    unsigned buffer_offset;
155    u_upload_alloc(sctx->b.const_uploader, first_slot_offset, upload_size,
156                   si_optimal_tcc_alignment(sctx, upload_size), &buffer_offset,
157                   (struct pipe_resource **)&desc->buffer, (void **)&ptr);
158    if (!desc->buffer) {
159       desc->gpu_address = 0;
160       return false; /* skip the draw call */
161    }
162 
163    util_memcpy_cpu_to_le32(ptr, (char *)desc->list + first_slot_offset, upload_size);
164    desc->gpu_list = ptr - first_slot_offset / 4;
165 
166    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, desc->buffer,
167                              RADEON_USAGE_READ | RADEON_PRIO_DESCRIPTORS);
168 
169    /* The shader pointer should point to slot 0. */
170    buffer_offset -= first_slot_offset;
171    desc->gpu_address = desc->buffer->gpu_address + buffer_offset;
172 
173    assert(desc->buffer->flags & RADEON_FLAG_32BIT);
174    assert((desc->buffer->gpu_address >> 32) == sctx->screen->info.address32_hi);
175    assert((desc->gpu_address >> 32) == sctx->screen->info.address32_hi);
176    return true;
177 }
178 
179 static void
si_add_descriptors_to_bo_list(struct si_context * sctx,struct si_descriptors * desc)180 si_add_descriptors_to_bo_list(struct si_context *sctx, struct si_descriptors *desc)
181 {
182    if (!desc->buffer)
183       return;
184 
185    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, desc->buffer,
186                              RADEON_USAGE_READ | RADEON_PRIO_DESCRIPTORS);
187 }
188 
189 /* SAMPLER VIEWS */
190 
si_get_sampler_view_priority(struct si_resource * res)191 static inline unsigned si_get_sampler_view_priority(struct si_resource *res)
192 {
193    if (res->b.b.target == PIPE_BUFFER)
194       return RADEON_PRIO_SAMPLER_BUFFER;
195 
196    if (res->b.b.nr_samples > 1)
197       return RADEON_PRIO_SAMPLER_TEXTURE_MSAA;
198 
199    return RADEON_PRIO_SAMPLER_TEXTURE;
200 }
201 
si_sampler_and_image_descriptors(struct si_context * sctx,unsigned shader)202 static struct si_descriptors *si_sampler_and_image_descriptors(struct si_context *sctx,
203                                                                unsigned shader)
204 {
205    return &sctx->descriptors[si_sampler_and_image_descriptors_idx(shader)];
206 }
207 
si_release_sampler_views(struct si_samplers * samplers)208 static void si_release_sampler_views(struct si_samplers *samplers)
209 {
210    int i;
211 
212    for (i = 0; i < ARRAY_SIZE(samplers->views); i++) {
213       pipe_sampler_view_reference(&samplers->views[i], NULL);
214    }
215 }
216 
si_sampler_view_add_buffer(struct si_context * sctx,struct pipe_resource * resource,unsigned usage,bool is_stencil_sampler,bool check_mem)217 static void si_sampler_view_add_buffer(struct si_context *sctx, struct pipe_resource *resource,
218                                        unsigned usage, bool is_stencil_sampler,
219                                        bool check_mem)
220 {
221    struct si_texture *tex = (struct si_texture *)resource;
222    unsigned priority;
223 
224    if (!resource)
225       return;
226 
227    /* Use the flushed depth texture if direct sampling is unsupported. */
228    if (resource->target != PIPE_BUFFER && tex->is_depth &&
229        !si_can_sample_zs(tex, is_stencil_sampler))
230       tex = tex->flushed_depth_texture;
231 
232    priority = si_get_sampler_view_priority(&tex->buffer);
233    radeon_add_to_gfx_buffer_list_check_mem(sctx, &tex->buffer, usage | priority, check_mem);
234 }
235 
si_sampler_views_begin_new_cs(struct si_context * sctx,struct si_samplers * samplers)236 static void si_sampler_views_begin_new_cs(struct si_context *sctx, struct si_samplers *samplers)
237 {
238    unsigned mask = samplers->enabled_mask;
239 
240    /* Add buffers to the CS. */
241    while (mask) {
242       int i = u_bit_scan(&mask);
243       struct si_sampler_view *sview = (struct si_sampler_view *)samplers->views[i];
244 
245       si_sampler_view_add_buffer(sctx, sview->base.texture, RADEON_USAGE_READ,
246                                  sview->is_stencil_sampler, false);
247    }
248 }
249 
si_sampler_views_check_encrypted(struct si_context * sctx,struct si_samplers * samplers,unsigned samplers_declared)250 static bool si_sampler_views_check_encrypted(struct si_context *sctx, struct si_samplers *samplers,
251                                              unsigned samplers_declared)
252 {
253    unsigned mask = samplers->enabled_mask & samplers_declared;
254 
255    /* Verify if a samplers uses an encrypted resource */
256    while (mask) {
257       int i = u_bit_scan(&mask);
258       struct si_sampler_view *sview = (struct si_sampler_view *)samplers->views[i];
259 
260       struct si_resource *res = si_resource(sview->base.texture);
261       if (res->flags & RADEON_FLAG_ENCRYPTED)
262          return true;
263    }
264    return false;
265 }
266 
267 /* Set buffer descriptor fields that can be changed by reallocations. */
si_set_buf_desc_address(struct si_resource * buf,uint64_t offset,uint32_t * state)268 static void si_set_buf_desc_address(struct si_resource *buf, uint64_t offset, uint32_t *state)
269 {
270    uint64_t va = buf->gpu_address + offset;
271 
272    state[0] = va;
273    state[1] &= C_008F04_BASE_ADDRESS_HI;
274    state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
275 }
276 
277 /* Set texture descriptor fields that can be changed by reallocations.
278  *
279  * \param tex			texture
280  * \param base_level_info	information of the level of BASE_ADDRESS
281  * \param base_level		the level of BASE_ADDRESS
282  * \param first_level		pipe_sampler_view.u.tex.first_level
283  * \param block_width		util_format_get_blockwidth()
284  * \param is_stencil		select between separate Z & Stencil
285  * \param state			descriptor to update
286  */
si_set_mutable_tex_desc_fields(struct si_screen * sscreen,struct si_texture * tex,const struct legacy_surf_level * base_level_info,unsigned base_level,unsigned first_level,unsigned block_width,bool is_stencil,uint16_t access,uint32_t * restrict state)287 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture *tex,
288                                     const struct legacy_surf_level *base_level_info,
289                                     unsigned base_level, unsigned first_level, unsigned block_width,
290                                     /* restrict decreases overhead of si_set_sampler_view_desc ~8x. */
291                                     bool is_stencil, uint16_t access, uint32_t * restrict state)
292 {
293    uint64_t va, meta_va = 0;
294 
295    if (tex->is_depth && !si_can_sample_zs(tex, is_stencil)) {
296       tex = tex->flushed_depth_texture;
297       is_stencil = false;
298    }
299 
300    va = tex->buffer.gpu_address;
301 
302    if (sscreen->info.chip_class >= GFX9) {
303       /* Only stencil_offset needs to be added here. */
304       if (is_stencil)
305          va += tex->surface.u.gfx9.zs.stencil_offset;
306       else
307          va += tex->surface.u.gfx9.surf_offset;
308    } else {
309       va += (uint64_t)base_level_info->offset_256B * 256;
310    }
311 
312    state[0] = va >> 8;
313    state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
314 
315    /* Only macrotiled modes can set tile swizzle.
316     * GFX9 doesn't use (legacy) base_level_info.
317     */
318    if (sscreen->info.chip_class >= GFX9 || base_level_info->mode == RADEON_SURF_MODE_2D)
319       state[0] |= tex->surface.tile_swizzle;
320 
321    if (sscreen->info.chip_class >= GFX8) {
322       if (!(access & SI_IMAGE_ACCESS_DCC_OFF) && vi_dcc_enabled(tex, first_level)) {
323          meta_va = tex->buffer.gpu_address + tex->surface.meta_offset;
324 
325          if (sscreen->info.chip_class == GFX8) {
326             meta_va += tex->surface.u.legacy.color.dcc_level[base_level].dcc_offset;
327             assert(base_level_info->mode == RADEON_SURF_MODE_2D);
328          }
329 
330          unsigned dcc_tile_swizzle = tex->surface.tile_swizzle << 8;
331          dcc_tile_swizzle &= (1 << tex->surface.meta_alignment_log2) - 1;
332          meta_va |= dcc_tile_swizzle;
333       } else if (vi_tc_compat_htile_enabled(tex, first_level,
334                                             is_stencil ? PIPE_MASK_S : PIPE_MASK_Z)) {
335          meta_va = tex->buffer.gpu_address + tex->surface.meta_offset;
336       }
337 
338       if (meta_va)
339          state[6] |= S_008F28_COMPRESSION_EN(1);
340    }
341 
342    if (sscreen->info.chip_class >= GFX8 && sscreen->info.chip_class <= GFX9)
343       state[7] = meta_va >> 8;
344 
345    if (sscreen->info.chip_class >= GFX10) {
346       if (is_stencil) {
347          state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.zs.stencil_swizzle_mode);
348       } else {
349          state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.swizzle_mode);
350       }
351 
352       if (meta_va) {
353          struct gfx9_surf_meta_flags meta = {
354             .rb_aligned = 1,
355             .pipe_aligned = 1,
356          };
357 
358          if (!tex->is_depth && tex->surface.meta_offset)
359             meta = tex->surface.u.gfx9.color.dcc;
360 
361          state[6] |= S_00A018_META_PIPE_ALIGNED(meta.pipe_aligned) |
362                      S_00A018_META_DATA_ADDRESS_LO(meta_va >> 8) |
363                      /* DCC image stores require the following settings:
364                       * - INDEPENDENT_64B_BLOCKS = 0
365                       * - INDEPENDENT_128B_BLOCKS = 1
366                       * - MAX_COMPRESSED_BLOCK_SIZE = 128B
367                       * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
368                       *
369                       * The same limitations apply to SDMA compressed stores because
370                       * SDMA uses the same DCC codec.
371                       */
372                      S_00A018_WRITE_COMPRESS_ENABLE(ac_surface_supports_dcc_image_stores(sscreen->info.chip_class, &tex->surface) &&
373                                                     (access & SI_IMAGE_ACCESS_ALLOW_DCC_STORE));
374 
375          /* TC-compatible MSAA HTILE requires ITERATE_256. */
376          if (tex->is_depth && tex->buffer.b.b.nr_samples >= 2)
377             state[6] |= S_00A018_ITERATE_256(1);
378       }
379 
380       state[7] = meta_va >> 16;
381    } else if (sscreen->info.chip_class == GFX9) {
382       if (is_stencil) {
383          state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.zs.stencil_swizzle_mode);
384          state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.zs.stencil_epitch);
385       } else {
386          uint16_t epitch = tex->surface.u.gfx9.epitch;
387          if (tex->buffer.b.b.format == PIPE_FORMAT_R8G8_R8B8_UNORM &&
388              block_width == 1) {
389             /* epitch is patched in ac_surface for sdma/vcn blocks to get
390              * a value expressed in elements unit.
391              * But here the texture is used with block_width == 1 so we
392              * need epitch in pixel units.
393              */
394             epitch = (epitch + 1) / tex->surface.blk_w - 1;
395          }
396          state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.swizzle_mode);
397          state[4] |= S_008F20_PITCH(epitch);
398       }
399 
400       state[5] &=
401          C_008F24_META_DATA_ADDRESS & C_008F24_META_PIPE_ALIGNED & C_008F24_META_RB_ALIGNED;
402       if (meta_va) {
403          struct gfx9_surf_meta_flags meta = {
404             .rb_aligned = 1,
405             .pipe_aligned = 1,
406          };
407 
408          if (!tex->is_depth && tex->surface.meta_offset)
409             meta = tex->surface.u.gfx9.color.dcc;
410 
411          state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
412                      S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
413                      S_008F24_META_RB_ALIGNED(meta.rb_aligned);
414       }
415    } else {
416       /* GFX6-GFX8 */
417       unsigned pitch = base_level_info->nblk_x * block_width;
418       unsigned index = si_tile_mode_index(tex, base_level, is_stencil);
419 
420       state[3] |= S_008F1C_TILING_INDEX(index);
421       state[4] |= S_008F20_PITCH(pitch - 1);
422    }
423 
424    if (tex->swap_rgb_to_bgr) {
425       unsigned swizzle_x = G_008F1C_DST_SEL_X(state[3]);
426       unsigned swizzle_z = G_008F1C_DST_SEL_Z(state[3]);
427 
428       state[3] &= C_008F1C_DST_SEL_X;
429       state[3] |= S_008F1C_DST_SEL_X(swizzle_z);
430       state[3] &= C_008F1C_DST_SEL_Z;
431       state[3] |= S_008F1C_DST_SEL_Z(swizzle_x);
432    }
433 }
434 
si_set_sampler_state_desc(struct si_sampler_state * sstate,struct si_sampler_view * sview,struct si_texture * tex,uint32_t * desc)435 static void si_set_sampler_state_desc(struct si_sampler_state *sstate,
436                                       struct si_sampler_view *sview, struct si_texture *tex,
437                                       uint32_t *desc)
438 {
439    if (tex && tex->upgraded_depth && sview && !sview->is_stencil_sampler)
440       memcpy(desc, sstate->upgraded_depth_val, 4 * 4);
441    else
442       memcpy(desc, sstate->val, 4 * 4);
443 }
444 
si_set_sampler_view_desc(struct si_context * sctx,struct si_sampler_view * sview,struct si_sampler_state * sstate,uint32_t * restrict desc)445 static void si_set_sampler_view_desc(struct si_context *sctx, struct si_sampler_view *sview,
446                                      struct si_sampler_state *sstate,
447                                      /* restrict decreases overhead of si_set_sampler_view_desc ~8x. */
448                                      uint32_t * restrict desc)
449 {
450    struct pipe_sampler_view *view = &sview->base;
451    struct si_texture *tex = (struct si_texture *)view->texture;
452 
453    assert(tex); /* views with texture == NULL aren't supported */
454 
455    if (tex->buffer.b.b.target == PIPE_BUFFER) {
456       memcpy(desc, sview->state, 8 * 4);
457       memcpy(desc + 8, null_texture_descriptor, 4 * 4); /* Disable FMASK. */
458       si_set_buf_desc_address(&tex->buffer, sview->base.u.buf.offset, desc + 4);
459       return;
460    }
461 
462    if (unlikely(sview->dcc_incompatible)) {
463       if (vi_dcc_enabled(tex, view->u.tex.first_level))
464          if (!si_texture_disable_dcc(sctx, tex))
465             si_decompress_dcc(sctx, tex);
466 
467       sview->dcc_incompatible = false;
468    }
469 
470    bool is_separate_stencil = tex->db_compatible && sview->is_stencil_sampler;
471 
472    memcpy(desc, sview->state, 8 * 4);
473    si_set_mutable_tex_desc_fields(sctx->screen, tex, sview->base_level_info, sview->base_level,
474                                   sview->base.u.tex.first_level, sview->block_width,
475                                   is_separate_stencil, 0, desc);
476 
477    if (tex->surface.fmask_size) {
478       memcpy(desc + 8, sview->fmask_state, 8 * 4);
479    } else {
480       /* Disable FMASK and bind sampler state in [12:15]. */
481       memcpy(desc + 8, null_texture_descriptor, 4 * 4);
482 
483       if (sstate)
484          si_set_sampler_state_desc(sstate, sview, tex, desc + 12);
485    }
486 }
487 
color_needs_decompression(struct si_texture * tex)488 static bool color_needs_decompression(struct si_texture *tex)
489 {
490    if (tex->is_depth)
491       return false;
492 
493    return tex->surface.fmask_size ||
494           (tex->dirty_level_mask && (tex->cmask_buffer || tex->surface.meta_offset));
495 }
496 
depth_needs_decompression(struct si_texture * tex,bool is_stencil)497 static bool depth_needs_decompression(struct si_texture *tex, bool is_stencil)
498 {
499    /* If the depth/stencil texture is TC-compatible, no decompression
500     * will be done. The decompression function will only flush DB caches
501     * to make it coherent with shaders. That's necessary because the driver
502     * doesn't flush DB caches in any other case.
503     */
504    return tex->db_compatible && (tex->dirty_level_mask || (is_stencil && tex->stencil_dirty_level_mask));
505 }
506 
si_reset_sampler_view_slot(struct si_samplers * samplers,unsigned slot,uint32_t * restrict desc)507 static void si_reset_sampler_view_slot(struct si_samplers *samplers, unsigned slot,
508                                        uint32_t * restrict desc)
509 {
510    pipe_sampler_view_reference(&samplers->views[slot], NULL);
511    memcpy(desc, null_texture_descriptor, 8 * 4);
512    /* Only clear the lower dwords of FMASK. */
513    memcpy(desc + 8, null_texture_descriptor, 4 * 4);
514    /* Re-set the sampler state if we are transitioning from FMASK. */
515    if (samplers->sampler_states[slot])
516       si_set_sampler_state_desc(samplers->sampler_states[slot], NULL, NULL, desc + 12);
517 }
518 
si_set_sampler_views(struct si_context * sctx,unsigned shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views,bool disallow_early_out)519 static void si_set_sampler_views(struct si_context *sctx, unsigned shader,
520                                 unsigned start_slot, unsigned count,
521                                 unsigned unbind_num_trailing_slots,
522                                 bool take_ownership, struct pipe_sampler_view **views,
523                                 bool disallow_early_out)
524 {
525    struct si_samplers *samplers = &sctx->samplers[shader];
526    struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
527    uint32_t unbound_mask = 0;
528 
529    if (views) {
530       for (unsigned i = 0; i < count; i++) {
531          unsigned slot = start_slot + i;
532          struct si_sampler_view *sview = (struct si_sampler_view *)views[i];
533          unsigned desc_slot = si_get_sampler_slot(slot);
534          /* restrict decreases overhead of si_set_sampler_view_desc ~8x. */
535          uint32_t *restrict desc = descs->list + desc_slot * 16;
536 
537          if (samplers->views[slot] == &sview->base && !disallow_early_out) {
538             if (take_ownership) {
539                struct pipe_sampler_view *view = views[i];
540                pipe_sampler_view_reference(&view, NULL);
541             }
542             continue;
543          }
544 
545          if (sview) {
546             struct si_texture *tex = (struct si_texture *)sview->base.texture;
547 
548             si_set_sampler_view_desc(sctx, sview, samplers->sampler_states[slot], desc);
549 
550             if (tex->buffer.b.b.target == PIPE_BUFFER) {
551                tex->buffer.bind_history |= SI_BIND_SAMPLER_BUFFER(shader);
552                samplers->needs_depth_decompress_mask &= ~(1u << slot);
553                samplers->needs_color_decompress_mask &= ~(1u << slot);
554             } else {
555                if (tex->is_depth) {
556                   samplers->has_depth_tex_mask |= 1u << slot;
557                   samplers->needs_color_decompress_mask &= ~(1u << slot);
558 
559                   if (depth_needs_decompression(tex, sview->is_stencil_sampler)) {
560                      samplers->needs_depth_decompress_mask |= 1u << slot;
561                   } else {
562                      samplers->needs_depth_decompress_mask &= ~(1u << slot);
563                   }
564                } else {
565                   samplers->has_depth_tex_mask &= ~(1u << slot);
566                   samplers->needs_depth_decompress_mask &= ~(1u << slot);
567 
568                   if (color_needs_decompression(tex)) {
569                      samplers->needs_color_decompress_mask |= 1u << slot;
570                   } else {
571                      samplers->needs_color_decompress_mask &= ~(1u << slot);
572                   }
573                }
574 
575                if (vi_dcc_enabled(tex, sview->base.u.tex.first_level) &&
576                    p_atomic_read(&tex->framebuffers_bound))
577                   sctx->need_check_render_feedback = true;
578             }
579 
580             if (take_ownership) {
581                pipe_sampler_view_reference(&samplers->views[slot], NULL);
582                samplers->views[slot] = &sview->base;
583             } else {
584                pipe_sampler_view_reference(&samplers->views[slot], &sview->base);
585             }
586             samplers->enabled_mask |= 1u << slot;
587 
588             /* Since this can flush, it must be done after enabled_mask is
589              * updated. */
590             si_sampler_view_add_buffer(sctx, &tex->buffer.b.b, RADEON_USAGE_READ,
591                                        sview->is_stencil_sampler, true);
592          } else {
593             si_reset_sampler_view_slot(samplers, slot, desc);
594             unbound_mask |= 1u << slot;
595          }
596       }
597    } else {
598       unbind_num_trailing_slots += count;
599       count = 0;
600    }
601 
602    for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
603       unsigned slot = start_slot + count + i;
604       unsigned desc_slot = si_get_sampler_slot(slot);
605       uint32_t * restrict desc = descs->list + desc_slot * 16;
606 
607       if (samplers->views[slot])
608          si_reset_sampler_view_slot(samplers, slot, desc);
609    }
610 
611    unbound_mask |= BITFIELD_RANGE(start_slot + count, unbind_num_trailing_slots);
612    samplers->enabled_mask &= ~unbound_mask;
613    samplers->has_depth_tex_mask &= ~unbound_mask;
614    samplers->needs_depth_decompress_mask &= ~unbound_mask;
615    samplers->needs_color_decompress_mask &= ~unbound_mask;
616 
617    sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
618 }
619 
si_update_shader_needs_decompress_mask(struct si_context * sctx,unsigned shader)620 static void si_update_shader_needs_decompress_mask(struct si_context *sctx, unsigned shader)
621 {
622    struct si_samplers *samplers = &sctx->samplers[shader];
623    unsigned shader_bit = 1 << shader;
624 
625    if (samplers->needs_depth_decompress_mask || samplers->needs_color_decompress_mask ||
626        sctx->images[shader].needs_color_decompress_mask)
627       sctx->shader_needs_decompress_mask |= shader_bit;
628    else
629       sctx->shader_needs_decompress_mask &= ~shader_bit;
630 
631    if (samplers->has_depth_tex_mask)
632       sctx->shader_has_depth_tex |= shader_bit;
633    else
634       sctx->shader_has_depth_tex &= ~shader_bit;
635 }
636 
si_pipe_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)637 static void si_pipe_set_sampler_views(struct pipe_context *ctx, enum pipe_shader_type shader,
638                                       unsigned start, unsigned count,
639                                       unsigned unbind_num_trailing_slots,
640                                       bool take_ownership, struct pipe_sampler_view **views)
641 {
642    struct si_context *sctx = (struct si_context *)ctx;
643 
644    if ((!count && !unbind_num_trailing_slots) || shader >= SI_NUM_SHADERS)
645       return;
646 
647    si_set_sampler_views(sctx, shader, start, count, unbind_num_trailing_slots,
648                         take_ownership, views, false);
649    si_update_shader_needs_decompress_mask(sctx, shader);
650 }
651 
si_samplers_update_needs_color_decompress_mask(struct si_samplers * samplers)652 static void si_samplers_update_needs_color_decompress_mask(struct si_samplers *samplers)
653 {
654    unsigned mask = samplers->enabled_mask;
655 
656    while (mask) {
657       int i = u_bit_scan(&mask);
658       struct pipe_resource *res = samplers->views[i]->texture;
659 
660       if (res && res->target != PIPE_BUFFER) {
661          struct si_texture *tex = (struct si_texture *)res;
662 
663          if (color_needs_decompression(tex)) {
664             samplers->needs_color_decompress_mask |= 1u << i;
665          } else {
666             samplers->needs_color_decompress_mask &= ~(1u << i);
667          }
668       }
669    }
670 }
671 
672 /* IMAGE VIEWS */
673 
si_release_image_views(struct si_images * images)674 static void si_release_image_views(struct si_images *images)
675 {
676    unsigned i;
677 
678    for (i = 0; i < SI_NUM_IMAGES; ++i) {
679       struct pipe_image_view *view = &images->views[i];
680 
681       pipe_resource_reference(&view->resource, NULL);
682    }
683 }
684 
si_image_views_begin_new_cs(struct si_context * sctx,struct si_images * images)685 static void si_image_views_begin_new_cs(struct si_context *sctx, struct si_images *images)
686 {
687    uint mask = images->enabled_mask;
688 
689    /* Add buffers to the CS. */
690    while (mask) {
691       int i = u_bit_scan(&mask);
692       struct pipe_image_view *view = &images->views[i];
693 
694       assert(view->resource);
695 
696       si_sampler_view_add_buffer(sctx, view->resource, RADEON_USAGE_READWRITE, false, false);
697    }
698 }
699 
si_image_views_check_encrypted(struct si_context * sctx,struct si_images * images,unsigned images_declared)700 static bool si_image_views_check_encrypted(struct si_context *sctx, struct si_images *images,
701                                            unsigned images_declared)
702 {
703    uint mask = images->enabled_mask & images_declared;
704 
705    while (mask) {
706       int i = u_bit_scan(&mask);
707       struct pipe_image_view *view = &images->views[i];
708 
709       assert(view->resource);
710 
711       struct si_texture *tex = (struct si_texture *)view->resource;
712       if (tex->buffer.flags & RADEON_FLAG_ENCRYPTED)
713          return true;
714    }
715    return false;
716 }
717 
si_disable_shader_image(struct si_context * ctx,unsigned shader,unsigned slot)718 static void si_disable_shader_image(struct si_context *ctx, unsigned shader, unsigned slot)
719 {
720    struct si_images *images = &ctx->images[shader];
721 
722    if (images->enabled_mask & (1u << slot)) {
723       struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
724       unsigned desc_slot = si_get_image_slot(slot);
725 
726       pipe_resource_reference(&images->views[slot].resource, NULL);
727       images->needs_color_decompress_mask &= ~(1 << slot);
728 
729       memcpy(descs->list + desc_slot * 8, null_image_descriptor, 8 * 4);
730       images->enabled_mask &= ~(1u << slot);
731       images->display_dcc_store_mask &= ~(1u << slot);
732       ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
733    }
734 }
735 
si_mark_image_range_valid(const struct pipe_image_view * view)736 static void si_mark_image_range_valid(const struct pipe_image_view *view)
737 {
738    struct si_resource *res = si_resource(view->resource);
739 
740    if (res->b.b.target != PIPE_BUFFER)
741       return;
742 
743    util_range_add(&res->b.b, &res->valid_buffer_range, view->u.buf.offset,
744                   view->u.buf.offset + view->u.buf.size);
745 }
746 
si_set_shader_image_desc(struct si_context * ctx,const struct pipe_image_view * view,bool skip_decompress,uint32_t * desc,uint32_t * fmask_desc)747 static void si_set_shader_image_desc(struct si_context *ctx, const struct pipe_image_view *view,
748                                      bool skip_decompress, uint32_t *desc, uint32_t *fmask_desc)
749 {
750    struct si_screen *screen = ctx->screen;
751    struct si_resource *res;
752 
753    res = si_resource(view->resource);
754 
755    if (res->b.b.target == PIPE_BUFFER) {
756       if (view->access & PIPE_IMAGE_ACCESS_WRITE)
757          si_mark_image_range_valid(view);
758 
759       si_make_buffer_descriptor(screen, res, view->format, view->u.buf.offset, view->u.buf.size,
760                                 desc);
761       si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
762    } else {
763       static const unsigned char swizzle[4] = {0, 1, 2, 3};
764       struct si_texture *tex = (struct si_texture *)res;
765       unsigned level = view->u.tex.level;
766       unsigned width, height, depth, hw_level;
767       bool uses_dcc = vi_dcc_enabled(tex, level);
768       unsigned access = view->access;
769 
770       if (uses_dcc && screen->always_allow_dcc_stores)
771          access |= SI_IMAGE_ACCESS_ALLOW_DCC_STORE;
772 
773       assert(!tex->is_depth);
774       assert(fmask_desc || tex->surface.fmask_offset == 0);
775 
776       if (uses_dcc && !skip_decompress &&
777           !(access & SI_IMAGE_ACCESS_DCC_OFF) &&
778           ((!(access & SI_IMAGE_ACCESS_ALLOW_DCC_STORE) && (access & PIPE_IMAGE_ACCESS_WRITE)) ||
779            !vi_dcc_formats_compatible(screen, res->b.b.format, view->format))) {
780          /* If DCC can't be disabled, at least decompress it.
781           * The decompression is relatively cheap if the surface
782           * has been decompressed already.
783           */
784          if (!si_texture_disable_dcc(ctx, tex))
785             si_decompress_dcc(ctx, tex);
786       }
787 
788       if (ctx->chip_class >= GFX9) {
789          /* Always set the base address. The swizzle modes don't
790           * allow setting mipmap level offsets as the base.
791           */
792          width = res->b.b.width0;
793          height = res->b.b.height0;
794          depth = res->b.b.depth0;
795          hw_level = level;
796       } else {
797          /* Always force the base level to the selected level.
798           *
799           * This is required for 3D textures, where otherwise
800           * selecting a single slice for non-layered bindings
801           * fails. It doesn't hurt the other targets.
802           */
803          width = u_minify(res->b.b.width0, level);
804          height = u_minify(res->b.b.height0, level);
805          depth = u_minify(res->b.b.depth0, level);
806          hw_level = 0;
807       }
808 
809       screen->make_texture_descriptor(
810          screen, tex, false, res->b.b.target, view->format, swizzle, hw_level, hw_level,
811          view->u.tex.first_layer, view->u.tex.last_layer, width, height, depth, desc, fmask_desc);
812       si_set_mutable_tex_desc_fields(screen, tex, &tex->surface.u.legacy.level[level], level, level,
813                                      util_format_get_blockwidth(view->format),
814                                      false, access, desc);
815    }
816 }
817 
si_set_shader_image(struct si_context * ctx,unsigned shader,unsigned slot,const struct pipe_image_view * view,bool skip_decompress)818 static void si_set_shader_image(struct si_context *ctx, unsigned shader, unsigned slot,
819                                 const struct pipe_image_view *view, bool skip_decompress)
820 {
821    struct si_images *images = &ctx->images[shader];
822    struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
823    struct si_resource *res;
824 
825    if (!view || !view->resource) {
826       si_disable_shader_image(ctx, shader, slot);
827       return;
828    }
829 
830    res = si_resource(view->resource);
831 
832    si_set_shader_image_desc(ctx, view, skip_decompress, descs->list + si_get_image_slot(slot) * 8,
833                             descs->list + si_get_image_slot(slot + SI_NUM_IMAGES) * 8);
834 
835    if (&images->views[slot] != view)
836       util_copy_image_view(&images->views[slot], view);
837 
838    if (res->b.b.target == PIPE_BUFFER) {
839       images->needs_color_decompress_mask &= ~(1 << slot);
840       images->display_dcc_store_mask &= ~(1u << slot);
841       res->bind_history |= SI_BIND_IMAGE_BUFFER(shader);
842    } else {
843       struct si_texture *tex = (struct si_texture *)res;
844       unsigned level = view->u.tex.level;
845 
846       if (color_needs_decompression(tex)) {
847          images->needs_color_decompress_mask |= 1 << slot;
848       } else {
849          images->needs_color_decompress_mask &= ~(1 << slot);
850       }
851 
852       if (tex->surface.display_dcc_offset && view->access & PIPE_IMAGE_ACCESS_WRITE) {
853          images->display_dcc_store_mask |= 1u << slot;
854 
855          /* Set displayable_dcc_dirty for non-compute stages conservatively (before draw calls). */
856          if (shader != PIPE_SHADER_COMPUTE)
857             tex->displayable_dcc_dirty = true;
858       } else {
859          images->display_dcc_store_mask &= ~(1u << slot);
860       }
861 
862       if (vi_dcc_enabled(tex, level) && p_atomic_read(&tex->framebuffers_bound))
863          ctx->need_check_render_feedback = true;
864    }
865 
866    images->enabled_mask |= 1u << slot;
867    ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
868 
869    /* Since this can flush, it must be done after enabled_mask is updated. */
870    si_sampler_view_add_buffer(
871       ctx, &res->b.b,
872       (view->access & PIPE_IMAGE_ACCESS_WRITE) ? RADEON_USAGE_READWRITE : RADEON_USAGE_READ, false,
873       true);
874 }
875 
si_set_shader_images(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * views)876 static void si_set_shader_images(struct pipe_context *pipe, enum pipe_shader_type shader,
877                                  unsigned start_slot, unsigned count,
878                                  unsigned unbind_num_trailing_slots,
879                                  const struct pipe_image_view *views)
880 {
881    struct si_context *ctx = (struct si_context *)pipe;
882    unsigned i, slot;
883 
884    assert(shader < SI_NUM_SHADERS);
885 
886    if (!count && !unbind_num_trailing_slots)
887       return;
888 
889    assert(start_slot + count + unbind_num_trailing_slots <= SI_NUM_IMAGES);
890 
891    if (views) {
892       for (i = 0, slot = start_slot; i < count; ++i, ++slot)
893          si_set_shader_image(ctx, shader, slot, &views[i], false);
894    } else {
895       for (i = 0, slot = start_slot; i < count; ++i, ++slot)
896          si_set_shader_image(ctx, shader, slot, NULL, false);
897    }
898 
899    for (i = 0; i < unbind_num_trailing_slots; ++i, ++slot)
900       si_set_shader_image(ctx, shader, slot, NULL, false);
901 
902    if (shader == PIPE_SHADER_COMPUTE &&
903        ctx->cs_shader_state.program &&
904        start_slot < ctx->cs_shader_state.program->sel.cs_num_images_in_user_sgprs)
905       ctx->compute_image_sgprs_dirty = true;
906 
907    si_update_shader_needs_decompress_mask(ctx, shader);
908 }
909 
si_images_update_needs_color_decompress_mask(struct si_images * images)910 static void si_images_update_needs_color_decompress_mask(struct si_images *images)
911 {
912    unsigned mask = images->enabled_mask;
913 
914    while (mask) {
915       int i = u_bit_scan(&mask);
916       struct pipe_resource *res = images->views[i].resource;
917 
918       if (res && res->target != PIPE_BUFFER) {
919          struct si_texture *tex = (struct si_texture *)res;
920 
921          if (color_needs_decompression(tex)) {
922             images->needs_color_decompress_mask |= 1 << i;
923          } else {
924             images->needs_color_decompress_mask &= ~(1 << i);
925          }
926       }
927    }
928 }
929 
si_update_ps_colorbuf0_slot(struct si_context * sctx)930 void si_update_ps_colorbuf0_slot(struct si_context *sctx)
931 {
932    struct si_buffer_resources *buffers = &sctx->internal_bindings;
933    struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_INTERNAL];
934    unsigned slot = SI_PS_IMAGE_COLORBUF0;
935    struct pipe_surface *surf = NULL;
936 
937    /* si_texture_disable_dcc can get us here again. */
938    if (sctx->in_update_ps_colorbuf0_slot) {
939       assert(!sctx->ps_uses_fbfetch || sctx->framebuffer.state.cbufs[0]);
940       return;
941    }
942    sctx->in_update_ps_colorbuf0_slot = true;
943 
944    /* See whether FBFETCH is used and color buffer 0 is set. */
945    if (sctx->shader.ps.cso && sctx->shader.ps.cso->info.base.fs.uses_fbfetch_output &&
946        sctx->framebuffer.state.nr_cbufs && sctx->framebuffer.state.cbufs[0])
947       surf = sctx->framebuffer.state.cbufs[0];
948 
949    /* Return if FBFETCH transitions from disabled to disabled. */
950    if (!buffers->buffers[slot] && !surf) {
951       assert(!sctx->ps_uses_fbfetch);
952       sctx->in_update_ps_colorbuf0_slot = false;
953       return;
954    }
955 
956    sctx->ps_uses_fbfetch = surf != NULL;
957    si_update_ps_iter_samples(sctx);
958 
959    if (surf) {
960       struct si_texture *tex = (struct si_texture *)surf->texture;
961       struct pipe_image_view view = {0};
962 
963       assert(tex);
964       assert(!tex->is_depth);
965 
966       /* Disable DCC, because the texture is used as both a sampler
967        * and color buffer.
968        */
969       si_texture_disable_dcc(sctx, tex);
970 
971       if (tex->buffer.b.b.nr_samples <= 1 && tex->cmask_buffer) {
972          /* Disable CMASK. */
973          assert(tex->cmask_buffer != &tex->buffer);
974          si_eliminate_fast_color_clear(sctx, tex, NULL);
975          si_texture_discard_cmask(sctx->screen, tex);
976       }
977 
978       view.resource = surf->texture;
979       view.format = surf->format;
980       view.access = PIPE_IMAGE_ACCESS_READ;
981       view.u.tex.first_layer = surf->u.tex.first_layer;
982       view.u.tex.last_layer = surf->u.tex.last_layer;
983       view.u.tex.level = surf->u.tex.level;
984 
985       /* Set the descriptor. */
986       uint32_t *desc = descs->list + slot * 4;
987       memset(desc, 0, 16 * 4);
988       si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
989 
990       pipe_resource_reference(&buffers->buffers[slot], &tex->buffer.b.b);
991       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, &tex->buffer,
992                                 RADEON_USAGE_READ | RADEON_PRIO_SHADER_RW_IMAGE);
993       buffers->enabled_mask |= 1llu << slot;
994    } else {
995       /* Clear the descriptor. */
996       memset(descs->list + slot * 4, 0, 8 * 4);
997       pipe_resource_reference(&buffers->buffers[slot], NULL);
998       buffers->enabled_mask &= ~(1llu << slot);
999    }
1000 
1001    sctx->descriptors_dirty |= 1u << SI_DESCS_INTERNAL;
1002    sctx->in_update_ps_colorbuf0_slot = false;
1003 }
1004 
1005 /* SAMPLER STATES */
1006 
si_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start,unsigned count,void ** states)1007 static void si_bind_sampler_states(struct pipe_context *ctx, enum pipe_shader_type shader,
1008                                    unsigned start, unsigned count, void **states)
1009 {
1010    struct si_context *sctx = (struct si_context *)ctx;
1011    struct si_samplers *samplers = &sctx->samplers[shader];
1012    struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, shader);
1013    struct si_sampler_state **sstates = (struct si_sampler_state **)states;
1014    int i;
1015 
1016    if (!count || shader >= SI_NUM_SHADERS || !sstates)
1017       return;
1018 
1019    for (i = 0; i < count; i++) {
1020       unsigned slot = start + i;
1021       unsigned desc_slot = si_get_sampler_slot(slot);
1022 
1023       if (!sstates[i] || sstates[i] == samplers->sampler_states[slot])
1024          continue;
1025 
1026 #ifndef NDEBUG
1027       assert(sstates[i]->magic == SI_SAMPLER_STATE_MAGIC);
1028 #endif
1029       samplers->sampler_states[slot] = sstates[i];
1030 
1031       /* If FMASK is bound, don't overwrite it.
1032        * The sampler state will be set after FMASK is unbound.
1033        */
1034       struct si_sampler_view *sview = (struct si_sampler_view *)samplers->views[slot];
1035 
1036       struct si_texture *tex = NULL;
1037 
1038       if (sview && sview->base.texture && sview->base.texture->target != PIPE_BUFFER)
1039          tex = (struct si_texture *)sview->base.texture;
1040 
1041       if (tex && tex->surface.fmask_size)
1042          continue;
1043 
1044       si_set_sampler_state_desc(sstates[i], sview, tex, desc->list + desc_slot * 16 + 12);
1045 
1046       sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
1047    }
1048 }
1049 
1050 /* BUFFER RESOURCES */
1051 
si_init_buffer_resources(struct si_context * sctx,struct si_buffer_resources * buffers,struct si_descriptors * descs,unsigned num_buffers,short shader_userdata_rel_index,unsigned priority,unsigned priority_constbuf)1052 static void si_init_buffer_resources(struct si_context *sctx,
1053                                      struct si_buffer_resources *buffers,
1054                                      struct si_descriptors *descs, unsigned num_buffers,
1055                                      short shader_userdata_rel_index,
1056                                      unsigned priority,
1057                                      unsigned priority_constbuf)
1058 {
1059    buffers->priority = priority;
1060    buffers->priority_constbuf = priority_constbuf;
1061    buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource *));
1062    buffers->offsets = CALLOC(num_buffers, sizeof(buffers->offsets[0]));
1063 
1064    si_init_descriptors(descs, shader_userdata_rel_index, 4, num_buffers);
1065 
1066    /* Initialize buffer descriptors, so that we don't have to do it at bind time. */
1067    for (unsigned i = 0; i < num_buffers; i++) {
1068       uint32_t *desc = descs->list + i * 4;
1069 
1070       desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1071                 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1072 
1073       if (sctx->chip_class >= GFX10) {
1074          desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
1075                     S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
1076       } else {
1077          desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1078                     S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1079       }
1080    }
1081 }
1082 
si_release_buffer_resources(struct si_buffer_resources * buffers,struct si_descriptors * descs)1083 static void si_release_buffer_resources(struct si_buffer_resources *buffers,
1084                                         struct si_descriptors *descs)
1085 {
1086    int i;
1087 
1088    for (i = 0; i < descs->num_elements; i++) {
1089       pipe_resource_reference(&buffers->buffers[i], NULL);
1090    }
1091 
1092    FREE(buffers->buffers);
1093    FREE(buffers->offsets);
1094 }
1095 
si_buffer_resources_begin_new_cs(struct si_context * sctx,struct si_buffer_resources * buffers)1096 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
1097                                              struct si_buffer_resources *buffers)
1098 {
1099    uint64_t mask = buffers->enabled_mask;
1100 
1101    /* Add buffers to the CS. */
1102    while (mask) {
1103       int i = u_bit_scan64(&mask);
1104 
1105       radeon_add_to_buffer_list(
1106          sctx, &sctx->gfx_cs, si_resource(buffers->buffers[i]),
1107          (buffers->writable_mask & (1llu << i) ? RADEON_USAGE_READWRITE : RADEON_USAGE_READ) |
1108          (i < SI_NUM_SHADER_BUFFERS ? buffers->priority : buffers->priority_constbuf));
1109    }
1110 }
1111 
si_buffer_resources_check_encrypted(struct si_context * sctx,struct si_buffer_resources * buffers)1112 static bool si_buffer_resources_check_encrypted(struct si_context *sctx,
1113                                                 struct si_buffer_resources *buffers)
1114 {
1115    uint64_t mask = buffers->enabled_mask;
1116 
1117    while (mask) {
1118       int i = u_bit_scan64(&mask);
1119 
1120       if (si_resource(buffers->buffers[i])->flags & RADEON_FLAG_ENCRYPTED)
1121          return true;
1122    }
1123 
1124    return false;
1125 }
1126 
si_get_buffer_from_descriptors(struct si_buffer_resources * buffers,struct si_descriptors * descs,unsigned idx,struct pipe_resource ** buf,unsigned * offset,unsigned * size)1127 static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
1128                                            struct si_descriptors *descs, unsigned idx,
1129                                            struct pipe_resource **buf, unsigned *offset,
1130                                            unsigned *size)
1131 {
1132    pipe_resource_reference(buf, buffers->buffers[idx]);
1133    if (*buf) {
1134       struct si_resource *res = si_resource(*buf);
1135       const uint32_t *desc = descs->list + idx * 4;
1136       uint64_t va;
1137 
1138       *size = desc[2];
1139 
1140       assert(G_008F04_STRIDE(desc[1]) == 0);
1141       va = si_desc_extract_buffer_address(desc);
1142 
1143       assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
1144       *offset = va - res->gpu_address;
1145    }
1146 }
1147 
1148 /* VERTEX BUFFERS */
1149 
si_vertex_buffers_begin_new_cs(struct si_context * sctx)1150 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
1151 {
1152    int count = sctx->num_vertex_elements;
1153    int i;
1154 
1155    for (i = 0; i < count; i++) {
1156       int vb = sctx->vertex_elements->vertex_buffer_index[i];
1157 
1158       if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1159          continue;
1160       if (!sctx->vertex_buffer[vb].buffer.resource)
1161          continue;
1162 
1163       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs,
1164                                 si_resource(sctx->vertex_buffer[vb].buffer.resource),
1165                                 RADEON_USAGE_READ | RADEON_PRIO_VERTEX_BUFFER);
1166    }
1167 
1168    if (!sctx->vb_descriptors_buffer)
1169       return;
1170    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->vb_descriptors_buffer,
1171                              RADEON_USAGE_READ | RADEON_PRIO_DESCRIPTORS);
1172 }
1173 
1174 /* CONSTANT BUFFERS */
1175 
si_const_and_shader_buffer_descriptors(struct si_context * sctx,unsigned shader)1176 static struct si_descriptors *si_const_and_shader_buffer_descriptors(struct si_context *sctx,
1177                                                                      unsigned shader)
1178 {
1179    return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)];
1180 }
1181 
si_upload_const_buffer(struct si_context * sctx,struct si_resource ** buf,const uint8_t * ptr,unsigned size,uint32_t * const_offset)1182 static void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf,
1183                                    const uint8_t *ptr, unsigned size, uint32_t *const_offset)
1184 {
1185    void *tmp;
1186 
1187    u_upload_alloc(sctx->b.const_uploader, 0, size, si_optimal_tcc_alignment(sctx, size),
1188                   const_offset, (struct pipe_resource **)buf, &tmp);
1189    if (*buf)
1190       util_memcpy_cpu_to_le32(tmp, ptr, size);
1191 }
1192 
si_set_constant_buffer(struct si_context * sctx,struct si_buffer_resources * buffers,unsigned descriptors_idx,uint slot,bool take_ownership,const struct pipe_constant_buffer * input)1193 static void si_set_constant_buffer(struct si_context *sctx, struct si_buffer_resources *buffers,
1194                                    unsigned descriptors_idx, uint slot, bool take_ownership,
1195                                    const struct pipe_constant_buffer *input)
1196 {
1197    struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1198    assert(slot < descs->num_elements);
1199    pipe_resource_reference(&buffers->buffers[slot], NULL);
1200 
1201    /* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
1202     * with a NULL buffer). We need to use a dummy buffer instead. */
1203    if (sctx->chip_class == GFX7 && (!input || (!input->buffer && !input->user_buffer)))
1204       input = &sctx->null_const_buf;
1205 
1206    if (input && (input->buffer || input->user_buffer)) {
1207       struct pipe_resource *buffer = NULL;
1208       uint64_t va;
1209       unsigned buffer_offset;
1210 
1211       /* Upload the user buffer if needed. */
1212       if (input->user_buffer) {
1213          si_upload_const_buffer(sctx, (struct si_resource **)&buffer, input->user_buffer,
1214                                 input->buffer_size, &buffer_offset);
1215          if (!buffer) {
1216             /* Just unbind on failure. */
1217             si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, false, NULL);
1218             return;
1219          }
1220       } else {
1221          if (take_ownership) {
1222             buffer = input->buffer;
1223          } else {
1224             pipe_resource_reference(&buffer, input->buffer);
1225          }
1226          buffer_offset = input->buffer_offset;
1227       }
1228 
1229       va = si_resource(buffer)->gpu_address + buffer_offset;
1230 
1231       /* Set the descriptor. */
1232       uint32_t *desc = descs->list + slot * 4;
1233       desc[0] = va;
1234       desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(0);
1235       desc[2] = input->buffer_size;
1236 
1237       buffers->buffers[slot] = buffer;
1238       buffers->offsets[slot] = buffer_offset;
1239       radeon_add_to_gfx_buffer_list_check_mem(sctx, si_resource(buffer),
1240                                               RADEON_USAGE_READ | buffers->priority_constbuf, true);
1241       buffers->enabled_mask |= 1llu << slot;
1242    } else {
1243       /* Clear the descriptor. Only 3 dwords are cleared. The 4th dword is immutable. */
1244       memset(descs->list + slot * 4, 0, sizeof(uint32_t) * 3);
1245       buffers->enabled_mask &= ~(1llu << slot);
1246    }
1247 
1248    sctx->descriptors_dirty |= 1u << descriptors_idx;
1249 }
1250 
si_get_inline_uniform_state(union si_shader_key * key,enum pipe_shader_type shader,bool * inline_uniforms,uint32_t ** inlined_values)1251 void si_get_inline_uniform_state(union si_shader_key *key, enum pipe_shader_type shader,
1252                                  bool *inline_uniforms, uint32_t **inlined_values)
1253 {
1254    if (shader == PIPE_SHADER_FRAGMENT) {
1255       *inline_uniforms = key->ps.opt.inline_uniforms;
1256       *inlined_values = key->ps.opt.inlined_uniform_values;
1257    } else {
1258       *inline_uniforms = key->ge.opt.inline_uniforms;
1259       *inlined_values = key->ge.opt.inlined_uniform_values;
1260    }
1261 }
1262 
si_invalidate_inlinable_uniforms(struct si_context * sctx,enum pipe_shader_type shader)1263 void si_invalidate_inlinable_uniforms(struct si_context *sctx, enum pipe_shader_type shader)
1264 {
1265    if (shader == PIPE_SHADER_COMPUTE)
1266       return;
1267 
1268    bool inline_uniforms;
1269    uint32_t *inlined_values;
1270    si_get_inline_uniform_state(&sctx->shaders[shader].key, shader, &inline_uniforms, &inlined_values);
1271 
1272    if (inline_uniforms) {
1273       if (shader == PIPE_SHADER_FRAGMENT)
1274          sctx->shaders[shader].key.ps.opt.inline_uniforms = false;
1275       else
1276          sctx->shaders[shader].key.ge.opt.inline_uniforms = false;
1277 
1278       memset(inlined_values, 0, MAX_INLINABLE_UNIFORMS * 4);
1279       sctx->do_update_shaders = true;
1280    }
1281 }
1282 
si_pipe_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type shader,uint slot,bool take_ownership,const struct pipe_constant_buffer * input)1283 static void si_pipe_set_constant_buffer(struct pipe_context *ctx, enum pipe_shader_type shader,
1284                                         uint slot, bool take_ownership,
1285                                         const struct pipe_constant_buffer *input)
1286 {
1287    struct si_context *sctx = (struct si_context *)ctx;
1288 
1289    if (shader >= SI_NUM_SHADERS)
1290       return;
1291 
1292    if (input) {
1293       if (input->buffer) {
1294          if (slot == 0 &&
1295              !(si_resource(input->buffer)->flags & RADEON_FLAG_32BIT)) {
1296             assert(!"constant buffer 0 must have a 32-bit VM address, use const_uploader");
1297             return;
1298          }
1299          si_resource(input->buffer)->bind_history |= SI_BIND_CONSTANT_BUFFER(shader);
1300       }
1301 
1302       if (slot == 0)
1303          si_invalidate_inlinable_uniforms(sctx, shader);
1304    }
1305 
1306    slot = si_get_constbuf_slot(slot);
1307    si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
1308                           si_const_and_shader_buffer_descriptors_idx(shader), slot,
1309                           take_ownership, input);
1310 }
1311 
si_set_inlinable_constants(struct pipe_context * ctx,enum pipe_shader_type shader,uint num_values,uint32_t * values)1312 static void si_set_inlinable_constants(struct pipe_context *ctx,
1313                                        enum pipe_shader_type shader,
1314                                        uint num_values, uint32_t *values)
1315 {
1316    struct si_context *sctx = (struct si_context *)ctx;
1317 
1318    if (shader == PIPE_SHADER_COMPUTE)
1319       return;
1320 
1321    bool inline_uniforms;
1322    uint32_t *inlined_values;
1323    si_get_inline_uniform_state(&sctx->shaders[shader].key, shader, &inline_uniforms, &inlined_values);
1324 
1325    if (!inline_uniforms) {
1326       /* It's the first time we set the constants. Always update shaders. */
1327       if (shader == PIPE_SHADER_FRAGMENT)
1328          sctx->shaders[shader].key.ps.opt.inline_uniforms = true;
1329       else
1330          sctx->shaders[shader].key.ge.opt.inline_uniforms = true;
1331 
1332       memcpy(inlined_values, values, num_values * 4);
1333       sctx->do_update_shaders = true;
1334       return;
1335    }
1336 
1337    /* We have already set inlinable constants for this shader. Update the shader only if
1338     * the constants are being changed so as not to update shaders needlessly.
1339     */
1340    if (memcmp(inlined_values, values, num_values * 4)) {
1341       memcpy(inlined_values, values, num_values * 4);
1342       sctx->do_update_shaders = true;
1343    }
1344 }
1345 
si_get_pipe_constant_buffer(struct si_context * sctx,uint shader,uint slot,struct pipe_constant_buffer * cbuf)1346 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader, uint slot,
1347                                  struct pipe_constant_buffer *cbuf)
1348 {
1349    cbuf->user_buffer = NULL;
1350    si_get_buffer_from_descriptors(
1351       &sctx->const_and_shader_buffers[shader], si_const_and_shader_buffer_descriptors(sctx, shader),
1352       si_get_constbuf_slot(slot), &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
1353 }
1354 
1355 /* SHADER BUFFERS */
1356 
si_set_shader_buffer(struct si_context * sctx,struct si_buffer_resources * buffers,unsigned descriptors_idx,uint slot,const struct pipe_shader_buffer * sbuffer,bool writable,unsigned priority)1357 static void si_set_shader_buffer(struct si_context *sctx, struct si_buffer_resources *buffers,
1358                                  unsigned descriptors_idx, uint slot,
1359                                  const struct pipe_shader_buffer *sbuffer, bool writable,
1360                                  unsigned priority)
1361 {
1362    struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1363    uint32_t *desc = descs->list + slot * 4;
1364 
1365    if (!sbuffer || !sbuffer->buffer) {
1366       pipe_resource_reference(&buffers->buffers[slot], NULL);
1367       /* Clear the descriptor. Only 3 dwords are cleared. The 4th dword is immutable. */
1368       memset(desc, 0, sizeof(uint32_t) * 3);
1369       buffers->enabled_mask &= ~(1llu << slot);
1370       buffers->writable_mask &= ~(1llu << slot);
1371       sctx->descriptors_dirty |= 1u << descriptors_idx;
1372       return;
1373    }
1374 
1375    struct si_resource *buf = si_resource(sbuffer->buffer);
1376    uint64_t va = buf->gpu_address + sbuffer->buffer_offset;
1377 
1378    desc[0] = va;
1379    desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(0);
1380    desc[2] = sbuffer->buffer_size;
1381 
1382    pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
1383    buffers->offsets[slot] = sbuffer->buffer_offset;
1384    radeon_add_to_gfx_buffer_list_check_mem(
1385       sctx, buf, (writable ? RADEON_USAGE_READWRITE : RADEON_USAGE_READ) | priority, true);
1386    if (writable)
1387       buffers->writable_mask |= 1llu << slot;
1388    else
1389       buffers->writable_mask &= ~(1llu << slot);
1390 
1391    buffers->enabled_mask |= 1llu << slot;
1392    sctx->descriptors_dirty |= 1lu << descriptors_idx;
1393 
1394    util_range_add(&buf->b.b, &buf->valid_buffer_range, sbuffer->buffer_offset,
1395                   sbuffer->buffer_offset + sbuffer->buffer_size);
1396 }
1397 
si_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * sbuffers,unsigned writable_bitmask,bool internal_blit)1398 void si_set_shader_buffers(struct pipe_context *ctx, enum pipe_shader_type shader,
1399                            unsigned start_slot, unsigned count,
1400                            const struct pipe_shader_buffer *sbuffers,
1401                            unsigned writable_bitmask, bool internal_blit)
1402 {
1403    struct si_context *sctx = (struct si_context *)ctx;
1404    struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1405    unsigned descriptors_idx = si_const_and_shader_buffer_descriptors_idx(shader);
1406    unsigned i;
1407 
1408    assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
1409 
1410    if (shader == PIPE_SHADER_COMPUTE &&
1411        sctx->cs_shader_state.program &&
1412        start_slot < sctx->cs_shader_state.program->sel.cs_num_shaderbufs_in_user_sgprs)
1413       sctx->compute_shaderbuf_sgprs_dirty = true;
1414 
1415    for (i = 0; i < count; ++i) {
1416       const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
1417       unsigned slot = si_get_shaderbuf_slot(start_slot + i);
1418 
1419       /* Don't track bind history for internal blits, such as clear_buffer and copy_buffer
1420        * to prevent unnecessary synchronization before compute blits later.
1421        */
1422       if (!internal_blit && sbuffer && sbuffer->buffer)
1423          si_resource(sbuffer->buffer)->bind_history |= SI_BIND_SHADER_BUFFER(shader);
1424 
1425       si_set_shader_buffer(sctx, buffers, descriptors_idx, slot, sbuffer,
1426                            !!(writable_bitmask & (1u << i)), buffers->priority);
1427    }
1428 }
1429 
si_pipe_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * sbuffers,unsigned writable_bitmask)1430 static void si_pipe_set_shader_buffers(struct pipe_context *ctx, enum pipe_shader_type shader,
1431                                        unsigned start_slot, unsigned count,
1432                                        const struct pipe_shader_buffer *sbuffers,
1433                                        unsigned writable_bitmask)
1434 {
1435    si_set_shader_buffers(ctx, shader, start_slot, count, sbuffers, writable_bitmask, false);
1436 }
1437 
si_get_shader_buffers(struct si_context * sctx,enum pipe_shader_type shader,uint start_slot,uint count,struct pipe_shader_buffer * sbuf)1438 void si_get_shader_buffers(struct si_context *sctx, enum pipe_shader_type shader, uint start_slot,
1439                            uint count, struct pipe_shader_buffer *sbuf)
1440 {
1441    struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
1442    struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
1443 
1444    for (unsigned i = 0; i < count; ++i) {
1445       si_get_buffer_from_descriptors(buffers, descs, si_get_shaderbuf_slot(start_slot + i),
1446                                      &sbuf[i].buffer, &sbuf[i].buffer_offset, &sbuf[i].buffer_size);
1447    }
1448 }
1449 
1450 /* RING BUFFERS */
1451 
si_set_internal_const_buffer(struct si_context * sctx,uint slot,const struct pipe_constant_buffer * input)1452 void si_set_internal_const_buffer(struct si_context *sctx, uint slot,
1453                                   const struct pipe_constant_buffer *input)
1454 {
1455    si_set_constant_buffer(sctx, &sctx->internal_bindings, SI_DESCS_INTERNAL, slot, false, input);
1456 }
1457 
si_set_internal_shader_buffer(struct si_context * sctx,uint slot,const struct pipe_shader_buffer * sbuffer)1458 void si_set_internal_shader_buffer(struct si_context *sctx, uint slot,
1459                                    const struct pipe_shader_buffer *sbuffer)
1460 {
1461    si_set_shader_buffer(sctx, &sctx->internal_bindings, SI_DESCS_INTERNAL, slot, sbuffer, true,
1462                         RADEON_PRIO_SHADER_RW_BUFFER);
1463 }
1464 
si_set_ring_buffer(struct si_context * sctx,uint slot,struct pipe_resource * buffer,unsigned stride,unsigned num_records,bool add_tid,bool swizzle,unsigned element_size,unsigned index_stride,uint64_t offset)1465 void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource *buffer,
1466                         unsigned stride, unsigned num_records, bool add_tid, bool swizzle,
1467                         unsigned element_size, unsigned index_stride, uint64_t offset)
1468 {
1469    struct si_buffer_resources *buffers = &sctx->internal_bindings;
1470    struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_INTERNAL];
1471 
1472    /* The stride field in the resource descriptor has 14 bits */
1473    assert(stride < (1 << 14));
1474 
1475    assert(slot < descs->num_elements);
1476    pipe_resource_reference(&buffers->buffers[slot], NULL);
1477 
1478    if (buffer) {
1479       uint64_t va;
1480 
1481       va = si_resource(buffer)->gpu_address + offset;
1482 
1483       switch (element_size) {
1484       default:
1485          assert(!"Unsupported ring buffer element size");
1486       case 0:
1487       case 2:
1488          element_size = 0;
1489          break;
1490       case 4:
1491          element_size = 1;
1492          break;
1493       case 8:
1494          element_size = 2;
1495          break;
1496       case 16:
1497          element_size = 3;
1498          break;
1499       }
1500 
1501       switch (index_stride) {
1502       default:
1503          assert(!"Unsupported ring buffer index stride");
1504       case 0:
1505       case 8:
1506          index_stride = 0;
1507          break;
1508       case 16:
1509          index_stride = 1;
1510          break;
1511       case 32:
1512          index_stride = 2;
1513          break;
1514       case 64:
1515          index_stride = 3;
1516          break;
1517       }
1518 
1519       if (sctx->chip_class >= GFX8 && stride)
1520          num_records *= stride;
1521 
1522       /* Set the descriptor. */
1523       uint32_t *desc = descs->list + slot * 4;
1524       desc[0] = va;
1525       desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride) |
1526                 S_008F04_SWIZZLE_ENABLE(swizzle);
1527       desc[2] = num_records;
1528       desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1529                 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1530                 S_008F0C_INDEX_STRIDE(index_stride) | S_008F0C_ADD_TID_ENABLE(add_tid);
1531 
1532       if (sctx->chip_class >= GFX9)
1533          assert(!swizzle || element_size == 1); /* always 4 bytes on GFX9 */
1534       else
1535          desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
1536 
1537       if (sctx->chip_class >= GFX10) {
1538          desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
1539                     S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
1540       } else {
1541          desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1542                     S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1543       }
1544 
1545       pipe_resource_reference(&buffers->buffers[slot], buffer);
1546       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(buffer),
1547                                 RADEON_USAGE_READWRITE | buffers->priority);
1548       buffers->enabled_mask |= 1llu << slot;
1549    } else {
1550       /* Clear the descriptor. */
1551       memset(descs->list + slot * 4, 0, sizeof(uint32_t) * 4);
1552       buffers->enabled_mask &= ~(1llu << slot);
1553    }
1554 
1555    sctx->descriptors_dirty |= 1u << SI_DESCS_INTERNAL;
1556 }
1557 
1558 /* INTERNAL CONST BUFFERS */
1559 
si_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * state)1560 static void si_set_polygon_stipple(struct pipe_context *ctx, const struct pipe_poly_stipple *state)
1561 {
1562    struct si_context *sctx = (struct si_context *)ctx;
1563    struct pipe_constant_buffer cb = {};
1564    unsigned stipple[32];
1565    int i;
1566 
1567    for (i = 0; i < 32; i++)
1568       stipple[i] = util_bitreverse(state->stipple[i]);
1569 
1570    cb.user_buffer = stipple;
1571    cb.buffer_size = sizeof(stipple);
1572 
1573    si_set_internal_const_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1574 }
1575 
1576 /* TEXTURE METADATA ENABLE/DISABLE */
1577 
si_resident_handles_update_needs_color_decompress(struct si_context * sctx)1578 static void si_resident_handles_update_needs_color_decompress(struct si_context *sctx)
1579 {
1580    util_dynarray_clear(&sctx->resident_tex_needs_color_decompress);
1581    util_dynarray_clear(&sctx->resident_img_needs_color_decompress);
1582 
1583    util_dynarray_foreach (&sctx->resident_tex_handles, struct si_texture_handle *, tex_handle) {
1584       struct pipe_resource *res = (*tex_handle)->view->texture;
1585       struct si_texture *tex;
1586 
1587       if (!res || res->target == PIPE_BUFFER)
1588          continue;
1589 
1590       tex = (struct si_texture *)res;
1591       if (!color_needs_decompression(tex))
1592          continue;
1593 
1594       util_dynarray_append(&sctx->resident_tex_needs_color_decompress, struct si_texture_handle *,
1595                            *tex_handle);
1596    }
1597 
1598    util_dynarray_foreach (&sctx->resident_img_handles, struct si_image_handle *, img_handle) {
1599       struct pipe_image_view *view = &(*img_handle)->view;
1600       struct pipe_resource *res = view->resource;
1601       struct si_texture *tex;
1602 
1603       if (!res || res->target == PIPE_BUFFER)
1604          continue;
1605 
1606       tex = (struct si_texture *)res;
1607       if (!color_needs_decompression(tex))
1608          continue;
1609 
1610       util_dynarray_append(&sctx->resident_img_needs_color_decompress, struct si_image_handle *,
1611                            *img_handle);
1612    }
1613 }
1614 
1615 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1616  * while the texture is bound, possibly by a different context. In that case,
1617  * call this function to update needs_*_decompress_masks.
1618  */
si_update_needs_color_decompress_masks(struct si_context * sctx)1619 void si_update_needs_color_decompress_masks(struct si_context *sctx)
1620 {
1621    for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1622       si_samplers_update_needs_color_decompress_mask(&sctx->samplers[i]);
1623       si_images_update_needs_color_decompress_mask(&sctx->images[i]);
1624       si_update_shader_needs_decompress_mask(sctx, i);
1625    }
1626 
1627    si_resident_handles_update_needs_color_decompress(sctx);
1628 }
1629 
1630 /* BUFFER DISCARD/INVALIDATION */
1631 
1632 /* Reset descriptors of buffer resources after \p buf has been invalidated.
1633  * If buf == NULL, reset all descriptors.
1634  */
si_reset_buffer_resources(struct si_context * sctx,struct si_buffer_resources * buffers,unsigned descriptors_idx,uint64_t slot_mask,struct pipe_resource * buf,unsigned priority)1635 static bool si_reset_buffer_resources(struct si_context *sctx, struct si_buffer_resources *buffers,
1636                                       unsigned descriptors_idx, uint64_t slot_mask,
1637                                       struct pipe_resource *buf, unsigned priority)
1638 {
1639    struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
1640    bool noop = true;
1641    uint64_t mask = buffers->enabled_mask & slot_mask;
1642 
1643    while (mask) {
1644       unsigned i = u_bit_scan64(&mask);
1645       struct pipe_resource *buffer = buffers->buffers[i];
1646 
1647       if (buffer && (!buf || buffer == buf)) {
1648          si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i], descs->list + i * 4);
1649          sctx->descriptors_dirty |= 1u << descriptors_idx;
1650 
1651          radeon_add_to_gfx_buffer_list_check_mem(
1652             sctx, si_resource(buffer),
1653             (buffers->writable_mask & (1llu << i) ? RADEON_USAGE_READWRITE : RADEON_USAGE_READ) |
1654             priority, true);
1655          noop = false;
1656       }
1657    }
1658    return !noop;
1659 }
1660 
1661 /* Update all buffer bindings where the buffer is bound, including
1662  * all resource descriptors. This is invalidate_buffer without
1663  * the invalidation.
1664  *
1665  * If buf == NULL, update all buffer bindings.
1666  */
si_rebind_buffer(struct si_context * sctx,struct pipe_resource * buf)1667 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf)
1668 {
1669    struct si_resource *buffer = si_resource(buf);
1670    unsigned i;
1671    unsigned num_elems = sctx->num_vertex_elements;
1672 
1673    /* We changed the buffer, now we need to bind it where the old one
1674     * was bound. This consists of 2 things:
1675     *   1) Updating the resource descriptor and dirtying it.
1676     *   2) Adding a relocation to the CS, so that it's usable.
1677     */
1678 
1679    /* Vertex buffers. */
1680    if (!buffer) {
1681       sctx->vertex_buffers_dirty = num_elems > 0;
1682    } else if (buffer->bind_history & SI_BIND_VERTEX_BUFFER) {
1683       for (i = 0; i < num_elems; i++) {
1684          int vb = sctx->vertex_elements->vertex_buffer_index[i];
1685 
1686          if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1687             continue;
1688          if (!sctx->vertex_buffer[vb].buffer.resource)
1689             continue;
1690 
1691          if (sctx->vertex_buffer[vb].buffer.resource == buf) {
1692             sctx->vertex_buffers_dirty = num_elems > 0;
1693             break;
1694          }
1695       }
1696    }
1697 
1698    /* Streamout buffers. (other internal buffers can't be invalidated) */
1699    if (!buffer || buffer->bind_history & SI_BIND_STREAMOUT_BUFFER) {
1700       for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1701          struct si_buffer_resources *buffers = &sctx->internal_bindings;
1702          struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_INTERNAL];
1703          struct pipe_resource *buffer = buffers->buffers[i];
1704 
1705          if (!buffer || (buf && buffer != buf))
1706             continue;
1707 
1708          si_set_buf_desc_address(si_resource(buffer), buffers->offsets[i], descs->list + i * 4);
1709          sctx->descriptors_dirty |= 1u << SI_DESCS_INTERNAL;
1710 
1711          radeon_add_to_gfx_buffer_list_check_mem(sctx, si_resource(buffer), RADEON_USAGE_WRITE |
1712                                                  RADEON_PRIO_SHADER_RW_BUFFER, true);
1713 
1714          /* Update the streamout state. */
1715          if (sctx->streamout.begin_emitted)
1716             si_emit_streamout_end(sctx);
1717          sctx->streamout.append_bitmask = sctx->streamout.enabled_mask;
1718          si_streamout_buffers_dirty(sctx);
1719       }
1720    }
1721 
1722    /* Constant and shader buffers. */
1723    if (!buffer || buffer->bind_history & SI_BIND_CONSTANT_BUFFER_ALL) {
1724       unsigned mask = buffer ? (buffer->bind_history & SI_BIND_CONSTANT_BUFFER_ALL) >>
1725                                SI_BIND_CONSTANT_BUFFER_SHIFT : BITFIELD_MASK(SI_NUM_SHADERS);
1726       u_foreach_bit(shader, mask) {
1727          si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1728                                    si_const_and_shader_buffer_descriptors_idx(shader),
1729                                    u_bit_consecutive64(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
1730                                    buf, sctx->const_and_shader_buffers[shader].priority_constbuf);
1731       }
1732    }
1733 
1734    if (!buffer || buffer->bind_history & SI_BIND_SHADER_BUFFER_ALL) {
1735       unsigned mask = buffer ? (buffer->bind_history & SI_BIND_SHADER_BUFFER_ALL) >>
1736                                SI_BIND_SHADER_BUFFER_SHIFT : BITFIELD_MASK(SI_NUM_SHADERS);
1737       u_foreach_bit(shader, mask) {
1738          if (si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
1739                                        si_const_and_shader_buffer_descriptors_idx(shader),
1740                                        u_bit_consecutive64(0, SI_NUM_SHADER_BUFFERS), buf,
1741                                        sctx->const_and_shader_buffers[shader].priority) &&
1742              shader == PIPE_SHADER_COMPUTE) {
1743             sctx->compute_shaderbuf_sgprs_dirty = true;
1744          }
1745       }
1746    }
1747 
1748    if (!buffer || buffer->bind_history & SI_BIND_SAMPLER_BUFFER_ALL) {
1749       unsigned mask = buffer ? (buffer->bind_history & SI_BIND_SAMPLER_BUFFER_ALL) >>
1750                                SI_BIND_SAMPLER_BUFFER_SHIFT : BITFIELD_MASK(SI_NUM_SHADERS);
1751       /* Texture buffers - update bindings. */
1752       u_foreach_bit(shader, mask) {
1753          struct si_samplers *samplers = &sctx->samplers[shader];
1754          struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
1755          unsigned mask = samplers->enabled_mask;
1756 
1757          while (mask) {
1758             unsigned i = u_bit_scan(&mask);
1759             struct pipe_resource *buffer = samplers->views[i]->texture;
1760 
1761             if (buffer && buffer->target == PIPE_BUFFER && (!buf || buffer == buf)) {
1762                unsigned desc_slot = si_get_sampler_slot(i);
1763 
1764                si_set_buf_desc_address(si_resource(buffer), samplers->views[i]->u.buf.offset,
1765                                        descs->list + desc_slot * 16 + 4);
1766                sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
1767 
1768                radeon_add_to_gfx_buffer_list_check_mem(sctx, si_resource(buffer), RADEON_USAGE_READ |
1769                                                        RADEON_PRIO_SAMPLER_BUFFER, true);
1770             }
1771          }
1772       }
1773    }
1774 
1775    /* Shader images */
1776    if (!buffer || buffer->bind_history & SI_BIND_IMAGE_BUFFER_ALL) {
1777       unsigned mask = buffer ? (buffer->bind_history & SI_BIND_IMAGE_BUFFER_SHIFT) >>
1778                                SI_BIND_IMAGE_BUFFER_SHIFT : BITFIELD_MASK(SI_NUM_SHADERS);
1779       u_foreach_bit(shader, mask) {
1780          struct si_images *images = &sctx->images[shader];
1781          struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
1782          unsigned mask = images->enabled_mask;
1783 
1784          while (mask) {
1785             unsigned i = u_bit_scan(&mask);
1786             struct pipe_resource *buffer = images->views[i].resource;
1787 
1788             if (buffer && buffer->target == PIPE_BUFFER && (!buf || buffer == buf)) {
1789                unsigned desc_slot = si_get_image_slot(i);
1790 
1791                if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1792                   si_mark_image_range_valid(&images->views[i]);
1793 
1794                si_set_buf_desc_address(si_resource(buffer), images->views[i].u.buf.offset,
1795                                        descs->list + desc_slot * 8 + 4);
1796                sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
1797 
1798                radeon_add_to_gfx_buffer_list_check_mem(sctx, si_resource(buffer),
1799                                                        RADEON_USAGE_READWRITE |
1800                                                        RADEON_PRIO_SAMPLER_BUFFER, true);
1801 
1802                if (shader == PIPE_SHADER_COMPUTE)
1803                   sctx->compute_image_sgprs_dirty = true;
1804             }
1805          }
1806       }
1807    }
1808 
1809    /* Bindless texture handles */
1810    if (!buffer || buffer->texture_handle_allocated) {
1811       struct si_descriptors *descs = &sctx->bindless_descriptors;
1812 
1813       util_dynarray_foreach (&sctx->resident_tex_handles, struct si_texture_handle *, tex_handle) {
1814          struct pipe_sampler_view *view = (*tex_handle)->view;
1815          unsigned desc_slot = (*tex_handle)->desc_slot;
1816          struct pipe_resource *buffer = view->texture;
1817 
1818          if (buffer && buffer->target == PIPE_BUFFER && (!buf || buffer == buf)) {
1819             si_set_buf_desc_address(si_resource(buffer), view->u.buf.offset,
1820                                     descs->list + desc_slot * 16 + 4);
1821 
1822             (*tex_handle)->desc_dirty = true;
1823             sctx->bindless_descriptors_dirty = true;
1824 
1825             radeon_add_to_gfx_buffer_list_check_mem(sctx, si_resource(buffer), RADEON_USAGE_READ |
1826                                                     RADEON_PRIO_SAMPLER_BUFFER, true);
1827          }
1828       }
1829    }
1830 
1831    /* Bindless image handles */
1832    if (!buffer || buffer->image_handle_allocated) {
1833       struct si_descriptors *descs = &sctx->bindless_descriptors;
1834 
1835       util_dynarray_foreach (&sctx->resident_img_handles, struct si_image_handle *, img_handle) {
1836          struct pipe_image_view *view = &(*img_handle)->view;
1837          unsigned desc_slot = (*img_handle)->desc_slot;
1838          struct pipe_resource *buffer = view->resource;
1839 
1840          if (buffer && buffer->target == PIPE_BUFFER && (!buf || buffer == buf)) {
1841             if (view->access & PIPE_IMAGE_ACCESS_WRITE)
1842                si_mark_image_range_valid(view);
1843 
1844             si_set_buf_desc_address(si_resource(buffer), view->u.buf.offset,
1845                                     descs->list + desc_slot * 16 + 4);
1846 
1847             (*img_handle)->desc_dirty = true;
1848             sctx->bindless_descriptors_dirty = true;
1849 
1850             radeon_add_to_gfx_buffer_list_check_mem(
1851                sctx, si_resource(buffer), RADEON_USAGE_READWRITE | RADEON_PRIO_SAMPLER_BUFFER, true);
1852          }
1853       }
1854    }
1855 
1856    if (buffer) {
1857       /* Do the same for other contexts. They will invoke this function
1858        * with buffer == NULL.
1859        */
1860       unsigned new_counter = p_atomic_inc_return(&sctx->screen->dirty_buf_counter);
1861 
1862       /* Skip the update for the current context, because we have already updated
1863        * the buffer bindings.
1864        */
1865       if (new_counter == sctx->last_dirty_buf_counter + 1)
1866          sctx->last_dirty_buf_counter = new_counter;
1867    }
1868 }
1869 
si_upload_bindless_descriptor(struct si_context * sctx,unsigned desc_slot,unsigned num_dwords)1870 static void si_upload_bindless_descriptor(struct si_context *sctx, unsigned desc_slot,
1871                                           unsigned num_dwords)
1872 {
1873    struct si_descriptors *desc = &sctx->bindless_descriptors;
1874    unsigned desc_slot_offset = desc_slot * 16;
1875    uint32_t *data;
1876    uint64_t va;
1877 
1878    data = desc->list + desc_slot_offset;
1879    va = desc->gpu_address + desc_slot_offset * 4;
1880 
1881    si_cp_write_data(sctx, desc->buffer, va - desc->buffer->gpu_address, num_dwords * 4, V_370_TC_L2,
1882                     V_370_ME, data);
1883 }
1884 
si_upload_bindless_descriptors(struct si_context * sctx)1885 static void si_upload_bindless_descriptors(struct si_context *sctx)
1886 {
1887    if (!sctx->bindless_descriptors_dirty)
1888       return;
1889 
1890    /* Wait for graphics/compute to be idle before updating the resident
1891     * descriptors directly in memory, in case the GPU is using them.
1892     */
1893    sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
1894    sctx->emit_cache_flush(sctx, &sctx->gfx_cs);
1895 
1896    util_dynarray_foreach (&sctx->resident_tex_handles, struct si_texture_handle *, tex_handle) {
1897       unsigned desc_slot = (*tex_handle)->desc_slot;
1898 
1899       if (!(*tex_handle)->desc_dirty)
1900          continue;
1901 
1902       si_upload_bindless_descriptor(sctx, desc_slot, 16);
1903       (*tex_handle)->desc_dirty = false;
1904    }
1905 
1906    util_dynarray_foreach (&sctx->resident_img_handles, struct si_image_handle *, img_handle) {
1907       unsigned desc_slot = (*img_handle)->desc_slot;
1908 
1909       if (!(*img_handle)->desc_dirty)
1910          continue;
1911 
1912       si_upload_bindless_descriptor(sctx, desc_slot, 8);
1913       (*img_handle)->desc_dirty = false;
1914    }
1915 
1916    /* Invalidate scalar L0 because the cache doesn't know that L2 changed. */
1917    sctx->flags |= SI_CONTEXT_INV_SCACHE;
1918    sctx->bindless_descriptors_dirty = false;
1919 }
1920 
1921 /* Update mutable image descriptor fields of all resident textures. */
si_update_bindless_texture_descriptor(struct si_context * sctx,struct si_texture_handle * tex_handle)1922 static void si_update_bindless_texture_descriptor(struct si_context *sctx,
1923                                                   struct si_texture_handle *tex_handle)
1924 {
1925    struct si_sampler_view *sview = (struct si_sampler_view *)tex_handle->view;
1926    struct si_descriptors *desc = &sctx->bindless_descriptors;
1927    unsigned desc_slot_offset = tex_handle->desc_slot * 16;
1928    uint32_t desc_list[16];
1929 
1930    if (sview->base.texture->target == PIPE_BUFFER)
1931       return;
1932 
1933    memcpy(desc_list, desc->list + desc_slot_offset, sizeof(desc_list));
1934    si_set_sampler_view_desc(sctx, sview, &tex_handle->sstate, desc->list + desc_slot_offset);
1935 
1936    if (memcmp(desc_list, desc->list + desc_slot_offset, sizeof(desc_list))) {
1937       tex_handle->desc_dirty = true;
1938       sctx->bindless_descriptors_dirty = true;
1939    }
1940 }
1941 
si_update_bindless_image_descriptor(struct si_context * sctx,struct si_image_handle * img_handle)1942 static void si_update_bindless_image_descriptor(struct si_context *sctx,
1943                                                 struct si_image_handle *img_handle)
1944 {
1945    struct si_descriptors *desc = &sctx->bindless_descriptors;
1946    unsigned desc_slot_offset = img_handle->desc_slot * 16;
1947    struct pipe_image_view *view = &img_handle->view;
1948    struct pipe_resource *res = view->resource;
1949    uint32_t image_desc[16];
1950    unsigned desc_size = (res->nr_samples >= 2 ? 16 : 8) * 4;
1951 
1952    if (res->target == PIPE_BUFFER)
1953       return;
1954 
1955    memcpy(image_desc, desc->list + desc_slot_offset, desc_size);
1956    si_set_shader_image_desc(sctx, view, true, desc->list + desc_slot_offset,
1957                             desc->list + desc_slot_offset + 8);
1958 
1959    if (memcmp(image_desc, desc->list + desc_slot_offset, desc_size)) {
1960       img_handle->desc_dirty = true;
1961       sctx->bindless_descriptors_dirty = true;
1962    }
1963 }
1964 
si_update_all_resident_texture_descriptors(struct si_context * sctx)1965 static void si_update_all_resident_texture_descriptors(struct si_context *sctx)
1966 {
1967    util_dynarray_foreach (&sctx->resident_tex_handles, struct si_texture_handle *, tex_handle) {
1968       si_update_bindless_texture_descriptor(sctx, *tex_handle);
1969    }
1970 
1971    util_dynarray_foreach (&sctx->resident_img_handles, struct si_image_handle *, img_handle) {
1972       si_update_bindless_image_descriptor(sctx, *img_handle);
1973    }
1974 
1975    si_upload_bindless_descriptors(sctx);
1976 }
1977 
1978 /* Update mutable image descriptor fields of all bound textures. */
si_update_all_texture_descriptors(struct si_context * sctx)1979 void si_update_all_texture_descriptors(struct si_context *sctx)
1980 {
1981    unsigned shader;
1982 
1983    for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1984       struct si_samplers *samplers = &sctx->samplers[shader];
1985       struct si_images *images = &sctx->images[shader];
1986       unsigned mask;
1987 
1988       /* Images. */
1989       mask = images->enabled_mask;
1990       while (mask) {
1991          unsigned i = u_bit_scan(&mask);
1992          struct pipe_image_view *view = &images->views[i];
1993 
1994          if (!view->resource || view->resource->target == PIPE_BUFFER)
1995             continue;
1996 
1997          si_set_shader_image(sctx, shader, i, view, true);
1998       }
1999 
2000       /* Sampler views. */
2001       mask = samplers->enabled_mask;
2002       while (mask) {
2003          unsigned i = u_bit_scan(&mask);
2004          struct pipe_sampler_view *view = samplers->views[i];
2005 
2006          if (!view || !view->texture || view->texture->target == PIPE_BUFFER)
2007             continue;
2008 
2009          si_set_sampler_views(sctx, shader, i, 1, 0, false, &samplers->views[i], true);
2010       }
2011 
2012       si_update_shader_needs_decompress_mask(sctx, shader);
2013    }
2014 
2015    si_update_all_resident_texture_descriptors(sctx);
2016    si_update_ps_colorbuf0_slot(sctx);
2017 }
2018 
2019 /* SHADER USER DATA */
2020 
si_mark_shader_pointers_dirty(struct si_context * sctx,unsigned shader)2021 static void si_mark_shader_pointers_dirty(struct si_context *sctx, unsigned shader)
2022 {
2023    sctx->shader_pointers_dirty |=
2024       u_bit_consecutive(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS, SI_NUM_SHADER_DESCS);
2025 
2026    if (shader == PIPE_SHADER_VERTEX) {
2027       unsigned num_vbos_in_user_sgprs = si_num_vbos_in_user_sgprs(sctx->screen);
2028 
2029       sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL &&
2030                                           sctx->num_vertex_elements >
2031                                           num_vbos_in_user_sgprs;
2032       sctx->vertex_buffer_user_sgprs_dirty =
2033          sctx->num_vertex_elements > 0 && num_vbos_in_user_sgprs;
2034    }
2035 
2036    si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2037 }
2038 
si_shader_pointers_mark_dirty(struct si_context * sctx)2039 void si_shader_pointers_mark_dirty(struct si_context *sctx)
2040 {
2041    unsigned num_vbos_in_user_sgprs = si_num_vbos_in_user_sgprs(sctx->screen);
2042 
2043    sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2044    sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL &&
2045                                        sctx->num_vertex_elements >
2046                                        num_vbos_in_user_sgprs;
2047    sctx->vertex_buffer_user_sgprs_dirty =
2048       sctx->num_vertex_elements > 0 && num_vbos_in_user_sgprs;
2049    si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2050    sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2051    sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
2052    sctx->compute_shaderbuf_sgprs_dirty = true;
2053    sctx->compute_image_sgprs_dirty = true;
2054 }
2055 
2056 /* Set a base register address for user data constants in the given shader.
2057  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
2058  */
si_set_user_data_base(struct si_context * sctx,unsigned shader,uint32_t new_base)2059 static void si_set_user_data_base(struct si_context *sctx, unsigned shader, uint32_t new_base)
2060 {
2061    uint32_t *base = &sctx->shader_pointers.sh_base[shader];
2062 
2063    if (*base != new_base) {
2064       *base = new_base;
2065 
2066       if (new_base)
2067          si_mark_shader_pointers_dirty(sctx, shader);
2068 
2069       /* Any change in enabled shader stages requires re-emitting
2070        * the VS state SGPR, because it contains the clamp_vertex_color
2071        * state, which can be done in VS, TES, and GS.
2072        */
2073       sctx->last_vs_state = ~0;
2074    }
2075 }
2076 
2077 /* This must be called when these are changed between enabled and disabled
2078  * - geometry shader
2079  * - tessellation evaluation shader
2080  * - NGG
2081  */
si_shader_change_notify(struct si_context * sctx)2082 void si_shader_change_notify(struct si_context *sctx)
2083 {
2084    si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2085                          si_get_user_data_base(sctx->chip_class,
2086                                                sctx->shader.tes.cso ? TESS_ON : TESS_OFF,
2087                                                sctx->shader.gs.cso ? GS_ON : GS_OFF,
2088                                                sctx->ngg ? NGG_ON : NGG_OFF,
2089                                                PIPE_SHADER_VERTEX));
2090 
2091    si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
2092                          si_get_user_data_base(sctx->chip_class,
2093                                                sctx->shader.tes.cso ? TESS_ON : TESS_OFF,
2094                                                sctx->shader.gs.cso ? GS_ON : GS_OFF,
2095                                                sctx->ngg ? NGG_ON : NGG_OFF,
2096                                                PIPE_SHADER_TESS_EVAL));
2097 
2098    /* Update as_* flags in shader keys. Ignore disabled shader stages.
2099     *   as_ls = VS before TCS
2100     *   as_es = VS before GS or TES before GS
2101     *   as_ngg = NGG enabled for the last geometry stage.
2102     *            If GS sets as_ngg, the previous stage must set as_ngg too.
2103     */
2104    if (sctx->shader.tes.cso) {
2105       sctx->shader.vs.key.ge.as_ls = 1;
2106       sctx->shader.vs.key.ge.as_es = 0;
2107       sctx->shader.vs.key.ge.as_ngg = 0;
2108 
2109       if (sctx->shader.gs.cso) {
2110          sctx->shader.tes.key.ge.as_es = 1;
2111          sctx->shader.tes.key.ge.as_ngg = sctx->ngg;
2112          sctx->shader.gs.key.ge.as_ngg = sctx->ngg;
2113       } else {
2114          sctx->shader.tes.key.ge.as_es = 0;
2115          sctx->shader.tes.key.ge.as_ngg = sctx->ngg;
2116       }
2117    } else if (sctx->shader.gs.cso) {
2118       sctx->shader.vs.key.ge.as_ls = 0;
2119       sctx->shader.vs.key.ge.as_es = 1;
2120       sctx->shader.vs.key.ge.as_ngg = sctx->ngg;
2121       sctx->shader.gs.key.ge.as_ngg = sctx->ngg;
2122    } else {
2123       sctx->shader.vs.key.ge.as_ls = 0;
2124       sctx->shader.vs.key.ge.as_es = 0;
2125       sctx->shader.vs.key.ge.as_ngg = sctx->ngg;
2126    }
2127 }
2128 
2129 #define si_emit_consecutive_shader_pointers(sctx, pointer_mask, sh_base) do { \
2130    unsigned sh_reg_base = (sh_base); \
2131    if (sh_reg_base) { \
2132       unsigned mask = sctx->shader_pointers_dirty & (pointer_mask); \
2133       \
2134       while (mask) { \
2135          int start, count; \
2136          u_bit_scan_consecutive_range(&mask, &start, &count); \
2137          \
2138          struct si_descriptors *descs = &sctx->descriptors[start]; \
2139          unsigned sh_offset = sh_reg_base + descs->shader_userdata_offset; \
2140          \
2141          radeon_set_sh_reg_seq(sh_offset, count); \
2142          for (int i = 0; i < count; i++) \
2143             radeon_emit_32bit_pointer(sctx->screen, descs[i].gpu_address); \
2144       } \
2145    } \
2146 } while (0)
2147 
si_emit_global_shader_pointers(struct si_context * sctx,struct si_descriptors * descs)2148 static void si_emit_global_shader_pointers(struct si_context *sctx, struct si_descriptors *descs)
2149 {
2150    radeon_begin(&sctx->gfx_cs);
2151 
2152    if (sctx->chip_class >= GFX10) {
2153       radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2154       /* HW VS stage only used in non-NGG mode. */
2155       radeon_emit_one_32bit_pointer(sctx, descs, R_00B130_SPI_SHADER_USER_DATA_VS_0);
2156       radeon_emit_one_32bit_pointer(sctx, descs, R_00B230_SPI_SHADER_USER_DATA_GS_0);
2157       radeon_emit_one_32bit_pointer(sctx, descs, R_00B430_SPI_SHADER_USER_DATA_HS_0);
2158       radeon_end();
2159       return;
2160    } else if (sctx->chip_class == GFX9 && sctx->shadowed_regs) {
2161       /* We can't use the COMMON registers with register shadowing. */
2162       radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2163       radeon_emit_one_32bit_pointer(sctx, descs, R_00B130_SPI_SHADER_USER_DATA_VS_0);
2164       radeon_emit_one_32bit_pointer(sctx, descs, R_00B330_SPI_SHADER_USER_DATA_ES_0);
2165       radeon_emit_one_32bit_pointer(sctx, descs, R_00B430_SPI_SHADER_USER_DATA_LS_0);
2166       radeon_end();
2167       return;
2168    } else if (sctx->chip_class == GFX9) {
2169       /* Broadcast it to all shader stages. */
2170       radeon_emit_one_32bit_pointer(sctx, descs, R_00B530_SPI_SHADER_USER_DATA_COMMON_0);
2171       radeon_end();
2172       return;
2173    }
2174 
2175    radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2176    radeon_emit_one_32bit_pointer(sctx, descs, R_00B130_SPI_SHADER_USER_DATA_VS_0);
2177    radeon_emit_one_32bit_pointer(sctx, descs, R_00B330_SPI_SHADER_USER_DATA_ES_0);
2178    radeon_emit_one_32bit_pointer(sctx, descs, R_00B230_SPI_SHADER_USER_DATA_GS_0);
2179    radeon_emit_one_32bit_pointer(sctx, descs, R_00B430_SPI_SHADER_USER_DATA_HS_0);
2180    radeon_emit_one_32bit_pointer(sctx, descs, R_00B530_SPI_SHADER_USER_DATA_LS_0);
2181    radeon_end();
2182 }
2183 
si_emit_graphics_shader_pointers(struct si_context * sctx)2184 void si_emit_graphics_shader_pointers(struct si_context *sctx)
2185 {
2186    uint32_t *sh_base = sctx->shader_pointers.sh_base;
2187 
2188    if (sctx->shader_pointers_dirty & (1 << SI_DESCS_INTERNAL)) {
2189       si_emit_global_shader_pointers(sctx, &sctx->descriptors[SI_DESCS_INTERNAL]);
2190    }
2191 
2192    radeon_begin(&sctx->gfx_cs);
2193    si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(VERTEX),
2194                                        sh_base[PIPE_SHADER_VERTEX]);
2195    si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_EVAL),
2196                                        sh_base[PIPE_SHADER_TESS_EVAL]);
2197    si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(FRAGMENT),
2198                                        sh_base[PIPE_SHADER_FRAGMENT]);
2199    si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(TESS_CTRL),
2200                                        sh_base[PIPE_SHADER_TESS_CTRL]);
2201    si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(GEOMETRY),
2202                                        sh_base[PIPE_SHADER_GEOMETRY]);
2203    radeon_end();
2204 
2205    sctx->shader_pointers_dirty &= ~u_bit_consecutive(SI_DESCS_INTERNAL, SI_DESCS_FIRST_COMPUTE);
2206 
2207    if (sctx->graphics_bindless_pointer_dirty) {
2208       si_emit_global_shader_pointers(sctx, &sctx->bindless_descriptors);
2209       sctx->graphics_bindless_pointer_dirty = false;
2210    }
2211 }
2212 
si_emit_compute_shader_pointers(struct si_context * sctx)2213 void si_emit_compute_shader_pointers(struct si_context *sctx)
2214 {
2215    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
2216    struct si_shader_selector *shader = &sctx->cs_shader_state.program->sel;
2217    unsigned base = R_00B900_COMPUTE_USER_DATA_0;
2218 
2219    radeon_begin(cs);
2220    si_emit_consecutive_shader_pointers(sctx, SI_DESCS_SHADER_MASK(COMPUTE),
2221                                        R_00B900_COMPUTE_USER_DATA_0);
2222    sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(COMPUTE);
2223 
2224    if (sctx->compute_bindless_pointer_dirty) {
2225       radeon_emit_one_32bit_pointer(sctx, &sctx->bindless_descriptors, base);
2226       sctx->compute_bindless_pointer_dirty = false;
2227    }
2228 
2229    /* Set shader buffer descriptors in user SGPRs. */
2230    unsigned num_shaderbufs = shader->cs_num_shaderbufs_in_user_sgprs;
2231    if (num_shaderbufs && sctx->compute_shaderbuf_sgprs_dirty) {
2232       struct si_descriptors *desc = si_const_and_shader_buffer_descriptors(sctx, PIPE_SHADER_COMPUTE);
2233 
2234       radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 +
2235                             shader->cs_shaderbufs_sgpr_index * 4,
2236                             num_shaderbufs * 4);
2237 
2238       for (unsigned i = 0; i < num_shaderbufs; i++)
2239          radeon_emit_array(&desc->list[si_get_shaderbuf_slot(i) * 4], 4);
2240 
2241       sctx->compute_shaderbuf_sgprs_dirty = false;
2242    }
2243 
2244    /* Set image descriptors in user SGPRs. */
2245    unsigned num_images = shader->cs_num_images_in_user_sgprs;
2246    if (num_images && sctx->compute_image_sgprs_dirty) {
2247       struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, PIPE_SHADER_COMPUTE);
2248 
2249       radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 +
2250                             shader->cs_images_sgpr_index * 4,
2251                             shader->cs_images_num_sgprs);
2252 
2253       for (unsigned i = 0; i < num_images; i++) {
2254          unsigned desc_offset = si_get_image_slot(i) * 8;
2255          unsigned num_sgprs = 8;
2256 
2257          /* Image buffers are in desc[4..7]. */
2258          if (shader->info.base.image_buffers & (1 << i)) {
2259             desc_offset += 4;
2260             num_sgprs = 4;
2261          }
2262 
2263          radeon_emit_array(&desc->list[desc_offset], num_sgprs);
2264       }
2265 
2266       sctx->compute_image_sgprs_dirty = false;
2267    }
2268    radeon_end();
2269 }
2270 
2271 /* BINDLESS */
2272 
si_init_bindless_descriptors(struct si_context * sctx,struct si_descriptors * desc,short shader_userdata_rel_index,unsigned num_elements)2273 static void si_init_bindless_descriptors(struct si_context *sctx, struct si_descriptors *desc,
2274                                          short shader_userdata_rel_index, unsigned num_elements)
2275 {
2276    ASSERTED unsigned desc_slot;
2277 
2278    si_init_descriptors(desc, shader_userdata_rel_index, 16, num_elements);
2279    sctx->bindless_descriptors.num_active_slots = num_elements;
2280 
2281    /* The first bindless descriptor is stored at slot 1, because 0 is not
2282     * considered to be a valid handle.
2283     */
2284    sctx->num_bindless_descriptors = 1;
2285 
2286    /* Track which bindless slots are used (or not). */
2287    util_idalloc_init(&sctx->bindless_used_slots, num_elements);
2288 
2289    /* Reserve slot 0 because it's an invalid handle for bindless. */
2290    desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2291    assert(desc_slot == 0);
2292 }
2293 
si_release_bindless_descriptors(struct si_context * sctx)2294 static void si_release_bindless_descriptors(struct si_context *sctx)
2295 {
2296    si_release_descriptors(&sctx->bindless_descriptors);
2297    util_idalloc_fini(&sctx->bindless_used_slots);
2298 }
2299 
si_get_first_free_bindless_slot(struct si_context * sctx)2300 static unsigned si_get_first_free_bindless_slot(struct si_context *sctx)
2301 {
2302    struct si_descriptors *desc = &sctx->bindless_descriptors;
2303    unsigned desc_slot;
2304 
2305    desc_slot = util_idalloc_alloc(&sctx->bindless_used_slots);
2306    if (desc_slot >= desc->num_elements) {
2307       /* The array of bindless descriptors is full, resize it. */
2308       unsigned slot_size = desc->element_dw_size * 4;
2309       unsigned new_num_elements = desc->num_elements * 2;
2310 
2311       desc->list =
2312          REALLOC(desc->list, desc->num_elements * slot_size, new_num_elements * slot_size);
2313       desc->num_elements = new_num_elements;
2314       desc->num_active_slots = new_num_elements;
2315    }
2316 
2317    assert(desc_slot);
2318    return desc_slot;
2319 }
2320 
si_create_bindless_descriptor(struct si_context * sctx,uint32_t * desc_list,unsigned size)2321 static unsigned si_create_bindless_descriptor(struct si_context *sctx, uint32_t *desc_list,
2322                                               unsigned size)
2323 {
2324    struct si_descriptors *desc = &sctx->bindless_descriptors;
2325    unsigned desc_slot, desc_slot_offset;
2326 
2327    /* Find a free slot. */
2328    desc_slot = si_get_first_free_bindless_slot(sctx);
2329 
2330    /* For simplicity, sampler and image bindless descriptors use fixed
2331     * 16-dword slots for now. Image descriptors only need 8-dword but this
2332     * doesn't really matter because no real apps use image handles.
2333     */
2334    desc_slot_offset = desc_slot * 16;
2335 
2336    /* Copy the descriptor into the array. */
2337    memcpy(desc->list + desc_slot_offset, desc_list, size);
2338 
2339    /* Re-upload the whole array of bindless descriptors into a new buffer.
2340     */
2341    if (!si_upload_descriptors(sctx, desc))
2342       return 0;
2343 
2344    /* Make sure to re-emit the shader pointers for all stages. */
2345    sctx->graphics_bindless_pointer_dirty = true;
2346    sctx->compute_bindless_pointer_dirty = true;
2347    si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2348 
2349    return desc_slot;
2350 }
2351 
si_update_bindless_buffer_descriptor(struct si_context * sctx,unsigned desc_slot,struct pipe_resource * resource,uint64_t offset,bool * desc_dirty)2352 static void si_update_bindless_buffer_descriptor(struct si_context *sctx, unsigned desc_slot,
2353                                                  struct pipe_resource *resource, uint64_t offset,
2354                                                  bool *desc_dirty)
2355 {
2356    struct si_descriptors *desc = &sctx->bindless_descriptors;
2357    struct si_resource *buf = si_resource(resource);
2358    unsigned desc_slot_offset = desc_slot * 16;
2359    uint32_t *desc_list = desc->list + desc_slot_offset + 4;
2360    uint64_t old_desc_va;
2361 
2362    assert(resource->target == PIPE_BUFFER);
2363 
2364    /* Retrieve the old buffer addr from the descriptor. */
2365    old_desc_va = si_desc_extract_buffer_address(desc_list);
2366 
2367    if (old_desc_va != buf->gpu_address + offset) {
2368       /* The buffer has been invalidated when the handle wasn't
2369        * resident, update the descriptor and the dirty flag.
2370        */
2371       si_set_buf_desc_address(buf, offset, &desc_list[0]);
2372 
2373       *desc_dirty = true;
2374    }
2375 }
2376 
si_create_texture_handle(struct pipe_context * ctx,struct pipe_sampler_view * view,const struct pipe_sampler_state * state)2377 static uint64_t si_create_texture_handle(struct pipe_context *ctx, struct pipe_sampler_view *view,
2378                                          const struct pipe_sampler_state *state)
2379 {
2380    struct si_sampler_view *sview = (struct si_sampler_view *)view;
2381    struct si_context *sctx = (struct si_context *)ctx;
2382    struct si_texture_handle *tex_handle;
2383    struct si_sampler_state *sstate;
2384    uint32_t desc_list[16];
2385    uint64_t handle;
2386 
2387    tex_handle = CALLOC_STRUCT(si_texture_handle);
2388    if (!tex_handle)
2389       return 0;
2390 
2391    memset(desc_list, 0, sizeof(desc_list));
2392    si_init_descriptor_list(&desc_list[0], 16, 1, null_texture_descriptor);
2393 
2394    sstate = ctx->create_sampler_state(ctx, state);
2395    if (!sstate) {
2396       FREE(tex_handle);
2397       return 0;
2398    }
2399 
2400    si_set_sampler_view_desc(sctx, sview, sstate, &desc_list[0]);
2401    memcpy(&tex_handle->sstate, sstate, sizeof(*sstate));
2402    ctx->delete_sampler_state(ctx, sstate);
2403 
2404    tex_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list, sizeof(desc_list));
2405    if (!tex_handle->desc_slot) {
2406       FREE(tex_handle);
2407       return 0;
2408    }
2409 
2410    handle = tex_handle->desc_slot;
2411 
2412    if (!_mesa_hash_table_insert(sctx->tex_handles, (void *)(uintptr_t)handle, tex_handle)) {
2413       FREE(tex_handle);
2414       return 0;
2415    }
2416 
2417    pipe_sampler_view_reference(&tex_handle->view, view);
2418 
2419    si_resource(sview->base.texture)->texture_handle_allocated = true;
2420 
2421    return handle;
2422 }
2423 
si_delete_texture_handle(struct pipe_context * ctx,uint64_t handle)2424 static void si_delete_texture_handle(struct pipe_context *ctx, uint64_t handle)
2425 {
2426    struct si_context *sctx = (struct si_context *)ctx;
2427    struct si_texture_handle *tex_handle;
2428    struct hash_entry *entry;
2429 
2430    entry = _mesa_hash_table_search(sctx->tex_handles, (void *)(uintptr_t)handle);
2431    if (!entry)
2432       return;
2433 
2434    tex_handle = (struct si_texture_handle *)entry->data;
2435 
2436    /* Allow this descriptor slot to be re-used. */
2437    util_idalloc_free(&sctx->bindless_used_slots, tex_handle->desc_slot);
2438 
2439    pipe_sampler_view_reference(&tex_handle->view, NULL);
2440    _mesa_hash_table_remove(sctx->tex_handles, entry);
2441    FREE(tex_handle);
2442 }
2443 
si_make_texture_handle_resident(struct pipe_context * ctx,uint64_t handle,bool resident)2444 static void si_make_texture_handle_resident(struct pipe_context *ctx, uint64_t handle,
2445                                             bool resident)
2446 {
2447    struct si_context *sctx = (struct si_context *)ctx;
2448    struct si_texture_handle *tex_handle;
2449    struct si_sampler_view *sview;
2450    struct hash_entry *entry;
2451 
2452    entry = _mesa_hash_table_search(sctx->tex_handles, (void *)(uintptr_t)handle);
2453    if (!entry)
2454       return;
2455 
2456    tex_handle = (struct si_texture_handle *)entry->data;
2457    sview = (struct si_sampler_view *)tex_handle->view;
2458 
2459    if (resident) {
2460       if (sview->base.texture->target != PIPE_BUFFER) {
2461          struct si_texture *tex = (struct si_texture *)sview->base.texture;
2462 
2463          if (depth_needs_decompression(tex, sview->is_stencil_sampler)) {
2464             util_dynarray_append(&sctx->resident_tex_needs_depth_decompress,
2465                                  struct si_texture_handle *, tex_handle);
2466          }
2467 
2468          if (color_needs_decompression(tex)) {
2469             util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
2470                                  struct si_texture_handle *, tex_handle);
2471          }
2472 
2473          if (vi_dcc_enabled(tex, sview->base.u.tex.first_level) &&
2474              p_atomic_read(&tex->framebuffers_bound))
2475             sctx->need_check_render_feedback = true;
2476 
2477          si_update_bindless_texture_descriptor(sctx, tex_handle);
2478       } else {
2479          si_update_bindless_buffer_descriptor(sctx, tex_handle->desc_slot, sview->base.texture,
2480                                               sview->base.u.buf.offset, &tex_handle->desc_dirty);
2481       }
2482 
2483       /* Re-upload the descriptor if it has been updated while it
2484        * wasn't resident.
2485        */
2486       if (tex_handle->desc_dirty)
2487          sctx->bindless_descriptors_dirty = true;
2488 
2489       /* Add the texture handle to the per-context list. */
2490       util_dynarray_append(&sctx->resident_tex_handles, struct si_texture_handle *, tex_handle);
2491 
2492       /* Add the buffers to the current CS in case si_begin_new_cs()
2493        * is not going to be called.
2494        */
2495       si_sampler_view_add_buffer(sctx, sview->base.texture, RADEON_USAGE_READ,
2496                                  sview->is_stencil_sampler, false);
2497    } else {
2498       /* Remove the texture handle from the per-context list. */
2499       util_dynarray_delete_unordered(&sctx->resident_tex_handles, struct si_texture_handle *,
2500                                      tex_handle);
2501 
2502       if (sview->base.texture->target != PIPE_BUFFER) {
2503          util_dynarray_delete_unordered(&sctx->resident_tex_needs_depth_decompress,
2504                                         struct si_texture_handle *, tex_handle);
2505 
2506          util_dynarray_delete_unordered(&sctx->resident_tex_needs_color_decompress,
2507                                         struct si_texture_handle *, tex_handle);
2508       }
2509    }
2510 }
2511 
si_create_image_handle(struct pipe_context * ctx,const struct pipe_image_view * view)2512 static uint64_t si_create_image_handle(struct pipe_context *ctx, const struct pipe_image_view *view)
2513 {
2514    struct si_context *sctx = (struct si_context *)ctx;
2515    struct si_image_handle *img_handle;
2516    uint32_t desc_list[16];
2517    uint64_t handle;
2518 
2519    if (!view || !view->resource)
2520       return 0;
2521 
2522    img_handle = CALLOC_STRUCT(si_image_handle);
2523    if (!img_handle)
2524       return 0;
2525 
2526    memset(desc_list, 0, sizeof(desc_list));
2527    si_init_descriptor_list(&desc_list[0], 8, 2, null_image_descriptor);
2528 
2529    si_set_shader_image_desc(sctx, view, false, &desc_list[0], &desc_list[8]);
2530 
2531    img_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list, sizeof(desc_list));
2532    if (!img_handle->desc_slot) {
2533       FREE(img_handle);
2534       return 0;
2535    }
2536 
2537    handle = img_handle->desc_slot;
2538 
2539    if (!_mesa_hash_table_insert(sctx->img_handles, (void *)(uintptr_t)handle, img_handle)) {
2540       FREE(img_handle);
2541       return 0;
2542    }
2543 
2544    util_copy_image_view(&img_handle->view, view);
2545 
2546    si_resource(view->resource)->image_handle_allocated = true;
2547 
2548    return handle;
2549 }
2550 
si_delete_image_handle(struct pipe_context * ctx,uint64_t handle)2551 static void si_delete_image_handle(struct pipe_context *ctx, uint64_t handle)
2552 {
2553    struct si_context *sctx = (struct si_context *)ctx;
2554    struct si_image_handle *img_handle;
2555    struct hash_entry *entry;
2556 
2557    entry = _mesa_hash_table_search(sctx->img_handles, (void *)(uintptr_t)handle);
2558    if (!entry)
2559       return;
2560 
2561    img_handle = (struct si_image_handle *)entry->data;
2562 
2563    util_copy_image_view(&img_handle->view, NULL);
2564    _mesa_hash_table_remove(sctx->img_handles, entry);
2565    FREE(img_handle);
2566 }
2567 
si_make_image_handle_resident(struct pipe_context * ctx,uint64_t handle,unsigned access,bool resident)2568 static void si_make_image_handle_resident(struct pipe_context *ctx, uint64_t handle,
2569                                           unsigned access, bool resident)
2570 {
2571    struct si_context *sctx = (struct si_context *)ctx;
2572    struct si_image_handle *img_handle;
2573    struct pipe_image_view *view;
2574    struct si_resource *res;
2575    struct hash_entry *entry;
2576 
2577    entry = _mesa_hash_table_search(sctx->img_handles, (void *)(uintptr_t)handle);
2578    if (!entry)
2579       return;
2580 
2581    img_handle = (struct si_image_handle *)entry->data;
2582    view = &img_handle->view;
2583    res = si_resource(view->resource);
2584 
2585    if (resident) {
2586       if (res->b.b.target != PIPE_BUFFER) {
2587          struct si_texture *tex = (struct si_texture *)res;
2588          unsigned level = view->u.tex.level;
2589 
2590          if (color_needs_decompression(tex)) {
2591             util_dynarray_append(&sctx->resident_img_needs_color_decompress,
2592                                  struct si_image_handle *, img_handle);
2593          }
2594 
2595          if (vi_dcc_enabled(tex, level) && p_atomic_read(&tex->framebuffers_bound))
2596             sctx->need_check_render_feedback = true;
2597 
2598          si_update_bindless_image_descriptor(sctx, img_handle);
2599       } else {
2600          si_update_bindless_buffer_descriptor(sctx, img_handle->desc_slot, view->resource,
2601                                               view->u.buf.offset, &img_handle->desc_dirty);
2602       }
2603 
2604       /* Re-upload the descriptor if it has been updated while it
2605        * wasn't resident.
2606        */
2607       if (img_handle->desc_dirty)
2608          sctx->bindless_descriptors_dirty = true;
2609 
2610       /* Add the image handle to the per-context list. */
2611       util_dynarray_append(&sctx->resident_img_handles, struct si_image_handle *, img_handle);
2612 
2613       /* Add the buffers to the current CS in case si_begin_new_cs()
2614        * is not going to be called.
2615        */
2616       si_sampler_view_add_buffer(
2617          sctx, view->resource,
2618          (access & PIPE_IMAGE_ACCESS_WRITE) ? RADEON_USAGE_READWRITE : RADEON_USAGE_READ, false,
2619          false);
2620    } else {
2621       /* Remove the image handle from the per-context list. */
2622       util_dynarray_delete_unordered(&sctx->resident_img_handles, struct si_image_handle *,
2623                                      img_handle);
2624 
2625       if (res->b.b.target != PIPE_BUFFER) {
2626          util_dynarray_delete_unordered(&sctx->resident_img_needs_color_decompress,
2627                                         struct si_image_handle *, img_handle);
2628       }
2629    }
2630 }
2631 
si_resident_buffers_add_all_to_bo_list(struct si_context * sctx)2632 static void si_resident_buffers_add_all_to_bo_list(struct si_context *sctx)
2633 {
2634    unsigned num_resident_tex_handles, num_resident_img_handles;
2635 
2636    num_resident_tex_handles = sctx->resident_tex_handles.size / sizeof(struct si_texture_handle *);
2637    num_resident_img_handles = sctx->resident_img_handles.size / sizeof(struct si_image_handle *);
2638 
2639    /* Add all resident texture handles. */
2640    util_dynarray_foreach (&sctx->resident_tex_handles, struct si_texture_handle *, tex_handle) {
2641       struct si_sampler_view *sview = (struct si_sampler_view *)(*tex_handle)->view;
2642 
2643       si_sampler_view_add_buffer(sctx, sview->base.texture, RADEON_USAGE_READ,
2644                                  sview->is_stencil_sampler, false);
2645    }
2646 
2647    /* Add all resident image handles. */
2648    util_dynarray_foreach (&sctx->resident_img_handles, struct si_image_handle *, img_handle) {
2649       struct pipe_image_view *view = &(*img_handle)->view;
2650 
2651       si_sampler_view_add_buffer(sctx, view->resource, RADEON_USAGE_READWRITE, false, false);
2652    }
2653 
2654    sctx->num_resident_handles += num_resident_tex_handles + num_resident_img_handles;
2655    assert(sctx->bo_list_add_all_resident_resources);
2656    sctx->bo_list_add_all_resident_resources = false;
2657 }
2658 
2659 /* INIT/DEINIT/UPLOAD */
2660 
si_init_all_descriptors(struct si_context * sctx)2661 void si_init_all_descriptors(struct si_context *sctx)
2662 {
2663    int i;
2664    unsigned first_shader = sctx->has_graphics ? 0 : PIPE_SHADER_COMPUTE;
2665 
2666    for (i = first_shader; i < SI_NUM_SHADERS; i++) {
2667       bool is_2nd =
2668          sctx->chip_class >= GFX9 && (i == PIPE_SHADER_TESS_CTRL || i == PIPE_SHADER_GEOMETRY);
2669       unsigned num_sampler_slots = SI_NUM_IMAGE_SLOTS / 2 + SI_NUM_SAMPLERS;
2670       unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
2671       int rel_dw_offset;
2672       struct si_descriptors *desc;
2673 
2674       if (is_2nd) {
2675          if (i == PIPE_SHADER_TESS_CTRL) {
2676             rel_dw_offset =
2677                (R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS - R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2678          } else if (sctx->chip_class >= GFX10) { /* PIPE_SHADER_GEOMETRY */
2679             rel_dw_offset =
2680                (R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS - R_00B230_SPI_SHADER_USER_DATA_GS_0) / 4;
2681          } else {
2682             rel_dw_offset =
2683                (R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS - R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2684          }
2685       } else {
2686          rel_dw_offset = SI_SGPR_CONST_AND_SHADER_BUFFERS;
2687       }
2688       desc = si_const_and_shader_buffer_descriptors(sctx, i);
2689       si_init_buffer_resources(sctx, &sctx->const_and_shader_buffers[i], desc, num_buffer_slots,
2690                                rel_dw_offset, RADEON_PRIO_SHADER_RW_BUFFER,
2691                                RADEON_PRIO_CONST_BUFFER);
2692       desc->slot_index_to_bind_directly = si_get_constbuf_slot(0);
2693 
2694       if (is_2nd) {
2695          if (i == PIPE_SHADER_TESS_CTRL) {
2696             rel_dw_offset =
2697                (R_00B40C_SPI_SHADER_USER_DATA_ADDR_HI_HS - R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
2698          } else if (sctx->chip_class >= GFX10) { /* PIPE_SHADER_GEOMETRY */
2699             rel_dw_offset =
2700                (R_00B20C_SPI_SHADER_USER_DATA_ADDR_HI_GS - R_00B230_SPI_SHADER_USER_DATA_GS_0) / 4;
2701          } else {
2702             rel_dw_offset =
2703                (R_00B20C_SPI_SHADER_USER_DATA_ADDR_HI_GS - R_00B330_SPI_SHADER_USER_DATA_ES_0) / 4;
2704          }
2705       } else {
2706          rel_dw_offset = SI_SGPR_SAMPLERS_AND_IMAGES;
2707       }
2708 
2709       desc = si_sampler_and_image_descriptors(sctx, i);
2710       si_init_descriptors(desc, rel_dw_offset, 16, num_sampler_slots);
2711 
2712       int j;
2713       for (j = 0; j < SI_NUM_IMAGE_SLOTS; j++)
2714          memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
2715       for (; j < SI_NUM_IMAGE_SLOTS + SI_NUM_SAMPLERS * 2; j++)
2716          memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
2717    }
2718 
2719    si_init_buffer_resources(sctx, &sctx->internal_bindings, &sctx->descriptors[SI_DESCS_INTERNAL],
2720                             SI_NUM_INTERNAL_BINDINGS, SI_SGPR_INTERNAL_BINDINGS,
2721                             /* The second priority is used by
2722                              * const buffers in RW buffer slots. */
2723                             RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER);
2724    sctx->descriptors[SI_DESCS_INTERNAL].num_active_slots = SI_NUM_INTERNAL_BINDINGS;
2725 
2726    /* Initialize an array of 1024 bindless descriptors, when the limit is
2727     * reached, just make it larger and re-upload the whole array.
2728     */
2729    si_init_bindless_descriptors(sctx, &sctx->bindless_descriptors,
2730                                 SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES, 1024);
2731 
2732    sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
2733 
2734    /* Set pipe_context functions. */
2735    sctx->b.bind_sampler_states = si_bind_sampler_states;
2736    sctx->b.set_shader_images = si_set_shader_images;
2737    sctx->b.set_constant_buffer = si_pipe_set_constant_buffer;
2738    sctx->b.set_inlinable_constants = si_set_inlinable_constants;
2739    sctx->b.set_shader_buffers = si_pipe_set_shader_buffers;
2740    sctx->b.set_sampler_views = si_pipe_set_sampler_views;
2741    sctx->b.create_texture_handle = si_create_texture_handle;
2742    sctx->b.delete_texture_handle = si_delete_texture_handle;
2743    sctx->b.make_texture_handle_resident = si_make_texture_handle_resident;
2744    sctx->b.create_image_handle = si_create_image_handle;
2745    sctx->b.delete_image_handle = si_delete_image_handle;
2746    sctx->b.make_image_handle_resident = si_make_image_handle_resident;
2747 
2748    if (!sctx->has_graphics)
2749       return;
2750 
2751    sctx->b.set_polygon_stipple = si_set_polygon_stipple;
2752 
2753    /* Shader user data. */
2754    sctx->atoms.s.shader_pointers.emit = si_emit_graphics_shader_pointers;
2755 
2756    /* Set default and immutable mappings. */
2757    si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
2758                          si_get_user_data_base(sctx->chip_class, TESS_OFF, GS_OFF,
2759                                                sctx->ngg, PIPE_SHADER_VERTEX));
2760    si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
2761                          si_get_user_data_base(sctx->chip_class, TESS_OFF, GS_OFF,
2762                                                NGG_OFF, PIPE_SHADER_TESS_CTRL));
2763    si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
2764                          si_get_user_data_base(sctx->chip_class, TESS_OFF, GS_OFF,
2765                                                NGG_OFF, PIPE_SHADER_GEOMETRY));
2766    si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
2767 }
2768 
si_upload_shader_descriptors(struct si_context * sctx,unsigned mask)2769 static bool si_upload_shader_descriptors(struct si_context *sctx, unsigned mask)
2770 {
2771    unsigned dirty = sctx->descriptors_dirty & mask;
2772 
2773    if (dirty) {
2774       unsigned iter_mask = dirty;
2775 
2776       do {
2777          if (!si_upload_descriptors(sctx, &sctx->descriptors[u_bit_scan(&iter_mask)]))
2778             return false;
2779       } while (iter_mask);
2780 
2781       sctx->descriptors_dirty &= ~dirty;
2782       sctx->shader_pointers_dirty |= dirty;
2783       si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
2784    }
2785 
2786    si_upload_bindless_descriptors(sctx);
2787    return true;
2788 }
2789 
si_upload_graphics_shader_descriptors(struct si_context * sctx)2790 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
2791 {
2792    const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
2793    return si_upload_shader_descriptors(sctx, mask);
2794 }
2795 
si_upload_compute_shader_descriptors(struct si_context * sctx)2796 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
2797 {
2798    /* This does not update internal bindings as that is not needed for compute shaders
2799     * and the input buffer is using the same SGPR's anyway.
2800     */
2801    const unsigned mask =
2802       u_bit_consecutive(SI_DESCS_FIRST_COMPUTE, SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
2803    return si_upload_shader_descriptors(sctx, mask);
2804 }
2805 
si_release_all_descriptors(struct si_context * sctx)2806 void si_release_all_descriptors(struct si_context *sctx)
2807 {
2808    int i;
2809 
2810    for (i = 0; i < SI_NUM_SHADERS; i++) {
2811       si_release_buffer_resources(&sctx->const_and_shader_buffers[i],
2812                                   si_const_and_shader_buffer_descriptors(sctx, i));
2813       si_release_sampler_views(&sctx->samplers[i]);
2814       si_release_image_views(&sctx->images[i]);
2815    }
2816    si_release_buffer_resources(&sctx->internal_bindings, &sctx->descriptors[SI_DESCS_INTERNAL]);
2817    for (i = 0; i < SI_NUM_VERTEX_BUFFERS; i++)
2818       pipe_vertex_buffer_unreference(&sctx->vertex_buffer[i]);
2819 
2820    for (i = 0; i < SI_NUM_DESCS; ++i)
2821       si_release_descriptors(&sctx->descriptors[i]);
2822 
2823    si_resource_reference(&sctx->vb_descriptors_buffer, NULL);
2824    sctx->vb_descriptors_gpu_list = NULL; /* points into a mapped buffer */
2825 
2826    si_release_bindless_descriptors(sctx);
2827 }
2828 
si_gfx_resources_check_encrypted(struct si_context * sctx)2829 bool si_gfx_resources_check_encrypted(struct si_context *sctx)
2830 {
2831    bool use_encrypted_bo = false;
2832 
2833    for (unsigned i = 0; i < SI_NUM_GRAPHICS_SHADERS && !use_encrypted_bo; i++) {
2834       struct si_shader_ctx_state *current_shader = &sctx->shaders[i];
2835       if (!current_shader->cso)
2836          continue;
2837 
2838       use_encrypted_bo |=
2839          si_buffer_resources_check_encrypted(sctx, &sctx->const_and_shader_buffers[i]);
2840       use_encrypted_bo |=
2841          si_sampler_views_check_encrypted(sctx, &sctx->samplers[i],
2842                                           current_shader->cso->info.base.textures_used[0]);
2843       use_encrypted_bo |= si_image_views_check_encrypted(sctx, &sctx->images[i],
2844                                           u_bit_consecutive(0, current_shader->cso->info.base.num_images));
2845    }
2846    use_encrypted_bo |= si_buffer_resources_check_encrypted(sctx, &sctx->internal_bindings);
2847 
2848    struct si_state_blend *blend = sctx->queued.named.blend;
2849    for (int i = 0; i < sctx->framebuffer.state.nr_cbufs && !use_encrypted_bo; i++) {
2850       struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
2851       if (surf && surf->texture) {
2852          struct si_texture *tex = (struct si_texture *)surf->texture;
2853          if (!(tex->buffer.flags & RADEON_FLAG_ENCRYPTED))
2854             continue;
2855 
2856          /* Are we reading from this framebuffer */
2857          if (((blend->blend_enable_4bit >> (4 * i)) & 0xf) ||
2858              vi_dcc_enabled(tex, 0)) {
2859             use_encrypted_bo = true;
2860          }
2861       }
2862    }
2863 
2864    if (sctx->framebuffer.state.zsbuf) {
2865       struct si_texture* zs = (struct si_texture *)sctx->framebuffer.state.zsbuf->texture;
2866       if (zs &&
2867           (zs->buffer.flags & RADEON_FLAG_ENCRYPTED)) {
2868          /* TODO: This isn't needed if depth.func is PIPE_FUNC_NEVER or PIPE_FUNC_ALWAYS */
2869          use_encrypted_bo = true;
2870       }
2871    }
2872 
2873 #ifndef NDEBUG
2874    if (use_encrypted_bo) {
2875       /* Verify that color buffers are encrypted */
2876       for (int i = 0; i < sctx->framebuffer.state.nr_cbufs; i++) {
2877          struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
2878          if (!surf)
2879             continue;
2880          struct si_texture *tex = (struct si_texture *)surf->texture;
2881          assert(!surf->texture || (tex->buffer.flags & RADEON_FLAG_ENCRYPTED));
2882       }
2883       /* Verify that depth/stencil buffer is encrypted */
2884       if (sctx->framebuffer.state.zsbuf) {
2885          struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
2886          struct si_texture *tex = (struct si_texture *)surf->texture;
2887          assert(!surf->texture || (tex->buffer.flags & RADEON_FLAG_ENCRYPTED));
2888       }
2889    }
2890 #endif
2891 
2892    return use_encrypted_bo;
2893 }
2894 
si_gfx_resources_add_all_to_bo_list(struct si_context * sctx)2895 void si_gfx_resources_add_all_to_bo_list(struct si_context *sctx)
2896 {
2897    for (unsigned i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
2898       si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[i]);
2899       si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i]);
2900       si_image_views_begin_new_cs(sctx, &sctx->images[i]);
2901    }
2902    si_buffer_resources_begin_new_cs(sctx, &sctx->internal_bindings);
2903    si_vertex_buffers_begin_new_cs(sctx);
2904 
2905    if (sctx->bo_list_add_all_resident_resources)
2906       si_resident_buffers_add_all_to_bo_list(sctx);
2907 
2908    assert(sctx->bo_list_add_all_gfx_resources);
2909    sctx->bo_list_add_all_gfx_resources = false;
2910 }
2911 
si_compute_resources_check_encrypted(struct si_context * sctx)2912 bool si_compute_resources_check_encrypted(struct si_context *sctx)
2913 {
2914    unsigned sh = PIPE_SHADER_COMPUTE;
2915 
2916    struct si_shader_info* info = &sctx->cs_shader_state.program->sel.info;
2917 
2918    /* TODO: we should assert that either use_encrypted_bo is false,
2919     * or all writable buffers are encrypted.
2920     */
2921    return si_buffer_resources_check_encrypted(sctx, &sctx->const_and_shader_buffers[sh]) ||
2922           si_sampler_views_check_encrypted(sctx, &sctx->samplers[sh], info->base.textures_used[0]) ||
2923           si_image_views_check_encrypted(sctx, &sctx->images[sh], u_bit_consecutive(0, info->base.num_images)) ||
2924           si_buffer_resources_check_encrypted(sctx, &sctx->internal_bindings);
2925 }
2926 
si_compute_resources_add_all_to_bo_list(struct si_context * sctx)2927 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx)
2928 {
2929    unsigned sh = PIPE_SHADER_COMPUTE;
2930 
2931    si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[sh]);
2932    si_sampler_views_begin_new_cs(sctx, &sctx->samplers[sh]);
2933    si_image_views_begin_new_cs(sctx, &sctx->images[sh]);
2934    si_buffer_resources_begin_new_cs(sctx, &sctx->internal_bindings);
2935 
2936    if (sctx->bo_list_add_all_resident_resources)
2937       si_resident_buffers_add_all_to_bo_list(sctx);
2938 
2939    assert(sctx->bo_list_add_all_compute_resources);
2940    sctx->bo_list_add_all_compute_resources = false;
2941 }
2942 
si_add_all_descriptors_to_bo_list(struct si_context * sctx)2943 void si_add_all_descriptors_to_bo_list(struct si_context *sctx)
2944 {
2945    for (unsigned i = 0; i < SI_NUM_DESCS; ++i)
2946       si_add_descriptors_to_bo_list(sctx, &sctx->descriptors[i]);
2947    si_add_descriptors_to_bo_list(sctx, &sctx->bindless_descriptors);
2948 
2949    sctx->bo_list_add_all_resident_resources = true;
2950    sctx->bo_list_add_all_gfx_resources = true;
2951    sctx->bo_list_add_all_compute_resources = true;
2952 }
2953 
si_set_active_descriptors(struct si_context * sctx,unsigned desc_idx,uint64_t new_active_mask)2954 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx, uint64_t new_active_mask)
2955 {
2956    struct si_descriptors *desc = &sctx->descriptors[desc_idx];
2957 
2958    /* Ignore no-op updates and updates that disable all slots. */
2959    if (!new_active_mask ||
2960        new_active_mask == u_bit_consecutive64(desc->first_active_slot, desc->num_active_slots))
2961       return;
2962 
2963    int first, count;
2964    u_bit_scan_consecutive_range64(&new_active_mask, &first, &count);
2965    assert(new_active_mask == 0);
2966 
2967    /* Upload/dump descriptors if slots are being enabled. */
2968    if (first < desc->first_active_slot ||
2969        first + count > desc->first_active_slot + desc->num_active_slots)
2970       sctx->descriptors_dirty |= 1u << desc_idx;
2971 
2972    desc->first_active_slot = first;
2973    desc->num_active_slots = count;
2974 }
2975 
si_set_active_descriptors_for_shader(struct si_context * sctx,struct si_shader_selector * sel)2976 void si_set_active_descriptors_for_shader(struct si_context *sctx, struct si_shader_selector *sel)
2977 {
2978    if (!sel)
2979       return;
2980 
2981    si_set_active_descriptors(sctx, sel->const_and_shader_buf_descriptors_index,
2982                              sel->active_const_and_shader_buffers);
2983    si_set_active_descriptors(sctx, sel->sampler_and_images_descriptors_index,
2984                              sel->active_samplers_and_images);
2985 }
2986