1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include "si_pipe.h"
27 #include "util/format/u_format.h"
28 #include "util/format_srgb.h"
29 #include "util/u_helpers.h"
30 
31 /* Determine the cache policy. */
get_cache_policy(struct si_context * sctx,enum si_coherency coher,uint64_t size)32 static enum si_cache_policy get_cache_policy(struct si_context *sctx, enum si_coherency coher,
33                                              uint64_t size)
34 {
35    if ((sctx->chip_class >= GFX9 && (coher == SI_COHERENCY_CB_META ||
36                                      coher == SI_COHERENCY_DB_META ||
37                                      coher == SI_COHERENCY_CP)) ||
38        (sctx->chip_class >= GFX7 && coher == SI_COHERENCY_SHADER))
39       return L2_LRU; /* it's faster if L2 doesn't evict anything  */
40 
41    return L2_BYPASS;
42 }
43 
si_get_flush_flags(struct si_context * sctx,enum si_coherency coher,enum si_cache_policy cache_policy)44 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,
45                             enum si_cache_policy cache_policy)
46 {
47    switch (coher) {
48    default:
49    case SI_COHERENCY_NONE:
50    case SI_COHERENCY_CP:
51       return 0;
52    case SI_COHERENCY_SHADER:
53       return SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
54              (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_L2 : 0);
55    case SI_COHERENCY_CB_META:
56       return SI_CONTEXT_FLUSH_AND_INV_CB;
57    case SI_COHERENCY_DB_META:
58       return SI_CONTEXT_FLUSH_AND_INV_DB;
59    }
60 }
61 
si_is_buffer_idle(struct si_context * sctx,struct si_resource * buf,unsigned usage)62 static bool si_is_buffer_idle(struct si_context *sctx, struct si_resource *buf,
63                               unsigned usage)
64 {
65    return !si_cs_is_buffer_referenced(sctx, buf->buf, usage) &&
66           sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, usage);
67 }
68 
si_improve_sync_flags(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,unsigned * flags)69 static void si_improve_sync_flags(struct si_context *sctx, struct pipe_resource *dst,
70                                   struct pipe_resource *src, unsigned *flags)
71 {
72    if (dst->target != PIPE_BUFFER || (src && src->target != PIPE_BUFFER))
73       return;
74 
75    if (si_is_buffer_idle(sctx, si_resource(dst), RADEON_USAGE_READWRITE) &&
76        (!src || si_is_buffer_idle(sctx, si_resource(src), RADEON_USAGE_WRITE))) {
77       /* Idle buffers don't have to sync. */
78       *flags &= ~(SI_OP_SYNC_GE_BEFORE | SI_OP_SYNC_PS_BEFORE | SI_OP_SYNC_CS_BEFORE |
79                   SI_OP_SYNC_CPDMA_BEFORE);
80       return;
81    }
82 
83    const unsigned cs_mask = SI_BIND_CONSTANT_BUFFER(PIPE_SHADER_COMPUTE) |
84                             SI_BIND_SHADER_BUFFER(PIPE_SHADER_COMPUTE) |
85                             SI_BIND_IMAGE_BUFFER(PIPE_SHADER_COMPUTE) |
86                             SI_BIND_SAMPLER_BUFFER(PIPE_SHADER_COMPUTE);
87 
88    const unsigned ps_mask = SI_BIND_CONSTANT_BUFFER(PIPE_SHADER_FRAGMENT) |
89                             SI_BIND_SHADER_BUFFER(PIPE_SHADER_FRAGMENT) |
90                             SI_BIND_IMAGE_BUFFER(PIPE_SHADER_FRAGMENT) |
91                             SI_BIND_SAMPLER_BUFFER(PIPE_SHADER_FRAGMENT);
92 
93    unsigned bind_history = si_resource(dst)->bind_history |
94                            (src ? si_resource(src)->bind_history : 0);
95 
96    /* Clear SI_OP_SYNC_CS_BEFORE if the buffer has never been used with a CS. */
97    if (*flags & SI_OP_SYNC_CS_BEFORE && !(bind_history & cs_mask))
98       *flags &= ~SI_OP_SYNC_CS_BEFORE;
99 
100    /* Clear SI_OP_SYNC_PS_BEFORE if the buffer has never been used with a PS. */
101    if (*flags & SI_OP_SYNC_PS_BEFORE && !(bind_history & ps_mask)) {
102       *flags &= ~SI_OP_SYNC_PS_BEFORE;
103       *flags |= SI_OP_SYNC_GE_BEFORE;
104    }
105 }
106 
si_launch_grid_internal(struct si_context * sctx,struct pipe_grid_info * info,void * shader,unsigned flags)107 void si_launch_grid_internal(struct si_context *sctx, struct pipe_grid_info *info,
108                              void *shader, unsigned flags)
109 {
110    /* Wait for previous shaders to finish. */
111    if (flags & SI_OP_SYNC_GE_BEFORE)
112       sctx->flags |= SI_CONTEXT_VS_PARTIAL_FLUSH;
113 
114    if (flags & SI_OP_SYNC_PS_BEFORE)
115       sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
116 
117    if (flags & SI_OP_SYNC_CS_BEFORE)
118       sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
119 
120    if (!(flags & SI_OP_CS_IMAGE))
121       sctx->flags |= SI_CONTEXT_PFP_SYNC_ME;
122 
123    /* Invalidate L0-L1 caches. */
124    /* sL0 is never invalidated, because src resources don't use it. */
125    if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))
126       sctx->flags |= SI_CONTEXT_INV_VCACHE;
127 
128    /* Set settings for driver-internal compute dispatches. */
129    sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS;
130    sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS;
131 
132    if (!(flags & SI_OP_CS_RENDER_COND_ENABLE))
133       sctx->render_cond_enabled = false;
134 
135    /* Skip decompression to prevent infinite recursion. */
136    sctx->blitter_running = true;
137 
138    /* Dispatch compute. */
139    void *saved_cs = sctx->cs_shader_state.program;
140    sctx->b.bind_compute_state(&sctx->b, shader);
141    sctx->b.launch_grid(&sctx->b, info);
142    sctx->b.bind_compute_state(&sctx->b, saved_cs);
143 
144    /* Restore default settings. */
145    sctx->flags &= ~SI_CONTEXT_STOP_PIPELINE_STATS;
146    sctx->flags |= SI_CONTEXT_START_PIPELINE_STATS;
147    sctx->render_cond_enabled = sctx->render_cond;
148    sctx->blitter_running = false;
149 
150    if (flags & SI_OP_SYNC_AFTER) {
151       sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
152 
153       if (flags & SI_OP_CS_IMAGE) {
154          /* Make sure image stores are visible to CB, which doesn't use L2 on GFX6-8. */
155          sctx->flags |= sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0;
156          /* Make sure image stores are visible to all CUs. */
157          sctx->flags |= SI_CONTEXT_INV_VCACHE;
158       } else {
159          /* Make sure buffer stores are visible to all CUs. */
160          sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE | SI_CONTEXT_PFP_SYNC_ME;
161       }
162    }
163 }
164 
si_launch_grid_internal_ssbos(struct si_context * sctx,struct pipe_grid_info * info,void * shader,unsigned flags,enum si_coherency coher,unsigned num_buffers,const struct pipe_shader_buffer * buffers,unsigned writeable_bitmask)165 void si_launch_grid_internal_ssbos(struct si_context *sctx, struct pipe_grid_info *info,
166                                    void *shader, unsigned flags, enum si_coherency coher,
167                                    unsigned num_buffers, const struct pipe_shader_buffer *buffers,
168                                    unsigned writeable_bitmask)
169 {
170    if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))
171       sctx->flags |= si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY);
172 
173    /* Save states. */
174    struct pipe_shader_buffer saved_sb[3] = {};
175    assert(num_buffers <= ARRAY_SIZE(saved_sb));
176    si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb);
177 
178    unsigned saved_writable_mask = 0;
179    for (unsigned i = 0; i < num_buffers; i++) {
180       if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &
181           (1u << si_get_shaderbuf_slot(i)))
182          saved_writable_mask |= 1 << i;
183    }
184 
185    /* Bind buffers and launch compute. */
186    si_set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, buffers,
187                          writeable_bitmask,
188                          true /* don't update bind_history to prevent unnecessary syncs later */);
189    si_launch_grid_internal(sctx, info, shader, flags);
190 
191    /* Do cache flushing at the end. */
192    if (get_cache_policy(sctx, coher, 0) == L2_BYPASS) {
193       if (flags & SI_OP_SYNC_AFTER)
194          sctx->flags |= SI_CONTEXT_WB_L2;
195    } else {
196       while (writeable_bitmask)
197          si_resource(buffers[u_bit_scan(&writeable_bitmask)].buffer)->TC_L2_dirty = true;
198    }
199 
200    /* Restore states. */
201    sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb,
202                               saved_writable_mask);
203    for (int i = 0; i < num_buffers; i++)
204       pipe_resource_reference(&saved_sb[i].buffer, NULL);
205 }
206 
207 /**
208  * Clear a buffer using read-modify-write with a 32-bit write bitmask.
209  * The clear value has 32 bits.
210  */
si_compute_clear_buffer_rmw(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,unsigned size,uint32_t clear_value,uint32_t writebitmask,unsigned flags,enum si_coherency coher)211 void si_compute_clear_buffer_rmw(struct si_context *sctx, struct pipe_resource *dst,
212                                  unsigned dst_offset, unsigned size,
213                                  uint32_t clear_value, uint32_t writebitmask,
214                                  unsigned flags, enum si_coherency coher)
215 {
216    assert(dst_offset % 4 == 0);
217    assert(size % 4 == 0);
218 
219    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
220 
221    /* Use buffer_load_dwordx4 and buffer_store_dwordx4 per thread. */
222    unsigned dwords_per_instruction = 4;
223    unsigned block_size = 64; /* it's always 64x1x1 */
224    unsigned dwords_per_wave = dwords_per_instruction * block_size;
225 
226    unsigned num_dwords = size / 4;
227    unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
228 
229    struct pipe_grid_info info = {};
230    info.block[0] = MIN2(block_size, num_instructions);
231    info.block[1] = 1;
232    info.block[2] = 1;
233    info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
234    info.grid[1] = 1;
235    info.grid[2] = 1;
236 
237    struct pipe_shader_buffer sb = {};
238    sb.buffer = dst;
239    sb.buffer_offset = dst_offset;
240    sb.buffer_size = size;
241 
242    sctx->cs_user_data[0] = clear_value & writebitmask;
243    sctx->cs_user_data[1] = ~writebitmask;
244 
245    if (!sctx->cs_clear_buffer_rmw)
246       sctx->cs_clear_buffer_rmw = si_create_clear_buffer_rmw_cs(sctx);
247 
248    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer_rmw, flags, coher,
249                                  1, &sb, 0x1);
250 }
251 
si_compute_clear_12bytes_buffer(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,unsigned size,const uint32_t * clear_value,unsigned flags,enum si_coherency coher)252 static void si_compute_clear_12bytes_buffer(struct si_context *sctx, struct pipe_resource *dst,
253                                             unsigned dst_offset, unsigned size,
254                                             const uint32_t *clear_value, unsigned flags,
255                                             enum si_coherency coher)
256 {
257    struct pipe_context *ctx = &sctx->b;
258 
259    assert(dst_offset % 4 == 0);
260    assert(size % 4 == 0);
261    unsigned size_12 = DIV_ROUND_UP(size, 12);
262 
263    struct pipe_shader_buffer sb = {0};
264    sb.buffer = dst;
265    sb.buffer_offset = dst_offset;
266    sb.buffer_size = size;
267 
268    memcpy(sctx->cs_user_data, clear_value, 12);
269 
270    struct pipe_grid_info info = {0};
271 
272    if (!sctx->cs_clear_12bytes_buffer)
273       sctx->cs_clear_12bytes_buffer = si_clear_12bytes_buffer_shader(ctx);
274 
275    info.block[0] = 64;
276    info.last_block[0] = size_12 % 64;
277    info.block[1] = 1;
278    info.block[2] = 1;
279    info.grid[0] = DIV_ROUND_UP(size_12, 64);
280    info.grid[1] = 1;
281    info.grid[2] = 1;
282 
283    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_12bytes_buffer, flags, coher,
284                                  1, &sb, 0x1);
285 }
286 
si_compute_do_clear_or_copy(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,struct pipe_resource * src,unsigned src_offset,unsigned size,const uint32_t * clear_value,unsigned clear_value_size,unsigned flags,enum si_coherency coher)287 static void si_compute_do_clear_or_copy(struct si_context *sctx, struct pipe_resource *dst,
288                                         unsigned dst_offset, struct pipe_resource *src,
289                                         unsigned src_offset, unsigned size,
290                                         const uint32_t *clear_value, unsigned clear_value_size,
291                                         unsigned flags, enum si_coherency coher)
292 {
293    assert(src_offset % 4 == 0);
294    assert(dst_offset % 4 == 0);
295    assert(size % 4 == 0);
296 
297    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
298    assert(!src || src_offset + size <= src->width0);
299 
300    /* The memory accesses are coalesced, meaning that the 1st instruction writes
301     * the 1st contiguous block of data for the whole wave, the 2nd instruction
302     * writes the 2nd contiguous block of data, etc.
303     */
304    unsigned dwords_per_thread =
305       src ? SI_COMPUTE_COPY_DW_PER_THREAD : SI_COMPUTE_CLEAR_DW_PER_THREAD;
306    unsigned instructions_per_thread = MAX2(1, dwords_per_thread / 4);
307    unsigned dwords_per_instruction = dwords_per_thread / instructions_per_thread;
308    /* The shader declares the block size like this: */
309    unsigned block_size = si_determine_wave_size(sctx->screen, NULL);
310    unsigned dwords_per_wave = dwords_per_thread * block_size;
311 
312    unsigned num_dwords = size / 4;
313    unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
314 
315    struct pipe_grid_info info = {};
316    info.block[0] = MIN2(block_size, num_instructions);
317    info.block[1] = 1;
318    info.block[2] = 1;
319    info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
320    info.grid[1] = 1;
321    info.grid[2] = 1;
322 
323    struct pipe_shader_buffer sb[2] = {};
324    sb[0].buffer = dst;
325    sb[0].buffer_offset = dst_offset;
326    sb[0].buffer_size = size;
327 
328    bool shader_dst_stream_policy = SI_COMPUTE_DST_CACHE_POLICY != L2_LRU;
329 
330    if (src) {
331       sb[1].buffer = src;
332       sb[1].buffer_offset = src_offset;
333       sb[1].buffer_size = size;
334 
335       if (!sctx->cs_copy_buffer) {
336          sctx->cs_copy_buffer = si_create_dma_compute_shader(
337             &sctx->b, SI_COMPUTE_COPY_DW_PER_THREAD, shader_dst_stream_policy, true);
338       }
339 
340       si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_copy_buffer, flags, coher,
341                                     2, sb, 0x1);
342    } else {
343       assert(clear_value_size >= 4 && clear_value_size <= 16 &&
344              util_is_power_of_two_or_zero(clear_value_size));
345 
346       for (unsigned i = 0; i < 4; i++)
347          sctx->cs_user_data[i] = clear_value[i % (clear_value_size / 4)];
348 
349       if (!sctx->cs_clear_buffer) {
350          sctx->cs_clear_buffer = si_create_dma_compute_shader(
351             &sctx->b, SI_COMPUTE_CLEAR_DW_PER_THREAD, shader_dst_stream_policy, false);
352       }
353 
354       si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer, flags, coher,
355                                     1, sb, 0x1);
356    }
357 }
358 
si_clear_buffer(struct si_context * sctx,struct pipe_resource * dst,uint64_t offset,uint64_t size,uint32_t * clear_value,uint32_t clear_value_size,unsigned flags,enum si_coherency coher,enum si_clear_method method)359 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
360                      uint64_t offset, uint64_t size, uint32_t *clear_value,
361                      uint32_t clear_value_size, unsigned flags,
362                      enum si_coherency coher, enum si_clear_method method)
363 {
364    if (!size)
365       return;
366 
367    si_improve_sync_flags(sctx, dst, NULL, &flags);
368 
369    ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);
370 
371    assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
372    assert(offset % clear_alignment == 0);
373    assert(size % clear_alignment == 0);
374    assert(size < (UINT_MAX & ~0xf)); /* TODO: test 64-bit sizes in all codepaths */
375 
376    uint32_t clamped;
377    if (util_lower_clearsize_to_dword(clear_value, (int*)&clear_value_size, &clamped))
378       clear_value = &clamped;
379 
380    if (clear_value_size == 12) {
381       si_compute_clear_12bytes_buffer(sctx, dst, offset, size, clear_value, flags, coher);
382       return;
383    }
384 
385    uint64_t aligned_size = size & ~3ull;
386    if (aligned_size >= 4) {
387       uint64_t compute_min_size;
388 
389       if (sctx->chip_class <= GFX8) {
390          /* CP DMA clears are terribly slow with GTT on GFX6-8, which can always
391           * happen due to BO evictions.
392           */
393          compute_min_size = 0;
394       } else {
395          /* Use a small enough size because CP DMA is slower than compute with bigger sizes. */
396          compute_min_size = 4 * 1024;
397       }
398 
399       if (method == SI_AUTO_SELECT_CLEAR_METHOD && (
400            clear_value_size > 4 ||
401            (clear_value_size == 4 && offset % 4 == 0 && size > compute_min_size))) {
402          method = SI_COMPUTE_CLEAR_METHOD;
403       }
404       if (method == SI_COMPUTE_CLEAR_METHOD) {
405          si_compute_do_clear_or_copy(sctx, dst, offset, NULL, 0, aligned_size, clear_value,
406                                      clear_value_size, flags, coher);
407       } else {
408          assert(clear_value_size == 4);
409          si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, dst, offset, aligned_size, *clear_value,
410                                 flags, coher, get_cache_policy(sctx, coher, size));
411       }
412 
413       offset += aligned_size;
414       size -= aligned_size;
415    }
416 
417    /* Handle non-dword alignment. */
418    if (size) {
419       assert(dst);
420       assert(dst->target == PIPE_BUFFER);
421       assert(size < 4);
422 
423       sctx->b.buffer_subdata(&sctx->b, dst,
424                              PIPE_MAP_WRITE |
425                              /* TC forbids drivers to invalidate buffers and infer unsychronized mappings,
426                               * so suppress those optimizations. */
427                              (sctx->tc ? TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED |
428                                          TC_TRANSFER_MAP_NO_INVALIDATE : 0),
429                              offset, size, clear_value);
430    }
431 }
432 
si_screen_clear_buffer(struct si_screen * sscreen,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value,unsigned flags)433 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst, uint64_t offset,
434                             uint64_t size, unsigned value, unsigned flags)
435 {
436    struct si_context *ctx = (struct si_context *)sscreen->aux_context;
437 
438    simple_mtx_lock(&sscreen->aux_context_lock);
439    si_clear_buffer(ctx, dst, offset, size, &value, 4, flags,
440                    SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);
441    sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
442    simple_mtx_unlock(&sscreen->aux_context_lock);
443 }
444 
si_pipe_clear_buffer(struct pipe_context * ctx,struct pipe_resource * dst,unsigned offset,unsigned size,const void * clear_value,int clear_value_size)445 static void si_pipe_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
446                                  unsigned offset, unsigned size, const void *clear_value,
447                                  int clear_value_size)
448 {
449    si_clear_buffer((struct si_context *)ctx, dst, offset, size, (uint32_t *)clear_value,
450                    clear_value_size, SI_OP_SYNC_BEFORE_AFTER, SI_COHERENCY_SHADER,
451                    SI_AUTO_SELECT_CLEAR_METHOD);
452 }
453 
si_copy_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned size,unsigned flags)454 void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src,
455                     uint64_t dst_offset, uint64_t src_offset, unsigned size, unsigned flags)
456 {
457    if (!size)
458       return;
459 
460    enum si_coherency coher = SI_COHERENCY_SHADER;
461    enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);
462    uint64_t compute_min_size = 8 * 1024;
463 
464    si_improve_sync_flags(sctx, dst, src, &flags);
465 
466    /* Only use compute for VRAM copies on dGPUs. */
467    if (sctx->screen->info.has_dedicated_vram && si_resource(dst)->domains & RADEON_DOMAIN_VRAM &&
468        si_resource(src)->domains & RADEON_DOMAIN_VRAM && size > compute_min_size &&
469        dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) {
470       si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset, size, NULL, 0,
471                                   flags, coher);
472    } else {
473       si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size,
474                             flags, coher, cache_policy);
475    }
476 }
477 
478 static void
set_work_size(struct pipe_grid_info * info,unsigned block_x,unsigned block_y,unsigned block_z,unsigned work_x,unsigned work_y,unsigned work_z)479 set_work_size(struct pipe_grid_info *info, unsigned block_x, unsigned block_y, unsigned block_z,
480               unsigned work_x, unsigned work_y, unsigned work_z)
481 {
482    info->block[0] = block_x;
483    info->block[1] = block_y;
484    info->block[2] = block_z;
485 
486    unsigned work[3] = {work_x, work_y, work_z};
487    for (int i = 0; i < 3; ++i) {
488       info->last_block[i] = work[i] % info->block[i];
489       info->grid[i] = DIV_ROUND_UP(work[i], info->block[i]);
490    }
491 }
492 
si_compute_copy_image(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_level,struct pipe_resource * src,unsigned src_level,unsigned dstx,unsigned dsty,unsigned dstz,const struct pipe_box * src_box,bool is_dcc_decompress,unsigned flags)493 void si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, unsigned dst_level,
494                            struct pipe_resource *src, unsigned src_level, unsigned dstx,
495                            unsigned dsty, unsigned dstz, const struct pipe_box *src_box,
496                            bool is_dcc_decompress, unsigned flags)
497 {
498    struct pipe_context *ctx = &sctx->b;
499    struct si_texture *ssrc = (struct si_texture*)src;
500    struct si_texture *sdst = (struct si_texture*)dst;
501    unsigned width = src_box->width;
502    unsigned height = src_box->height;
503    unsigned depth = src_box->depth;
504    enum pipe_format src_format = util_format_linear(src->format);
505    enum pipe_format dst_format = util_format_linear(dst->format);
506    bool is_linear = ssrc->surface.is_linear || sdst->surface.is_linear;
507    bool is_1D = dst->target == PIPE_TEXTURE_1D_ARRAY && src->target == PIPE_TEXTURE_1D_ARRAY;
508 
509    assert(util_format_is_subsampled_422(src_format) == util_format_is_subsampled_422(dst_format));
510 
511    if (!vi_dcc_enabled(ssrc, src_level) &&
512        !vi_dcc_enabled(sdst, dst_level) &&
513        src_format == dst_format &&
514        util_format_is_float(src_format) &&
515        !util_format_is_compressed(src_format)) {
516       /* Interpret as integer values to avoid NaN issues */
517       switch(util_format_get_blocksizebits(src_format)) {
518         case 16:
519           src_format = dst_format = PIPE_FORMAT_R16_UINT;
520           break;
521         case 32:
522           src_format = dst_format = PIPE_FORMAT_R32_UINT;
523           break;
524         case 64:
525           src_format = dst_format = PIPE_FORMAT_R32G32_UINT;
526           break;
527         case 128:
528           src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT;
529           break;
530         default:
531           assert(false);
532       }
533    }
534 
535    if (util_format_is_subsampled_422(src_format)) {
536       src_format = dst_format = PIPE_FORMAT_R32_UINT;
537       /* Interpreting 422 subsampled format (16 bpp) as 32 bpp
538        * should force us to divide src_box->x, dstx and width by 2.
539        * But given that ac_surface allocates this format as 32 bpp
540        * and that surf_size is then modified to pack the values
541        * we must keep the original values to get the correct results.
542        */
543    }
544 
545    if (width == 0 || height == 0)
546       return;
547 
548    /* The driver doesn't decompress resources automatically here. */
549    si_decompress_subresource(ctx, dst, PIPE_MASK_RGBAZS, dst_level, dstz,
550                              dstz + src_box->depth - 1);
551    si_decompress_subresource(ctx, src, PIPE_MASK_RGBAZS, src_level, src_box->z,
552                              src_box->z + src_box->depth - 1);
553 
554    /* src and dst have the same number of samples. */
555    si_make_CB_shader_coherent(sctx, src->nr_samples, true,
556                               ssrc->surface.u.gfx9.color.dcc.pipe_aligned);
557    if (sctx->chip_class >= GFX10) {
558       /* GFX10+ uses DCC stores so si_make_CB_shader_coherent is required for dst too */
559       si_make_CB_shader_coherent(sctx, dst->nr_samples, true,
560                                  sdst->surface.u.gfx9.color.dcc.pipe_aligned);
561    }
562 
563    struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
564    struct pipe_image_view saved_image[2] = {0};
565    util_copy_image_view(&saved_image[0], &images->views[0]);
566    util_copy_image_view(&saved_image[1], &images->views[1]);
567 
568    struct pipe_image_view image[2] = {0};
569    image[0].resource = src;
570    image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ;
571    image[0].format = src_format;
572    image[0].u.tex.level = src_level;
573    image[0].u.tex.first_layer = 0;
574    image[0].u.tex.last_layer = src->target == PIPE_TEXTURE_3D ? u_minify(src->depth0, src_level) - 1
575                                                               : (unsigned)(src->array_size - 1);
576    image[1].resource = dst;
577    image[1].shader_access = image[1].access = PIPE_IMAGE_ACCESS_WRITE;
578    image[1].format = dst_format;
579    image[1].u.tex.level = dst_level;
580    image[1].u.tex.first_layer = 0;
581    image[1].u.tex.last_layer = dst->target == PIPE_TEXTURE_3D ? u_minify(dst->depth0, dst_level) - 1
582                                                               : (unsigned)(dst->array_size - 1);
583 
584    /* SNORM8 blitting has precision issues on some chips. Use the SINT
585     * equivalent instead, which doesn't force DCC decompression.
586     */
587    if (util_format_is_snorm8(dst->format)) {
588       image[0].format = image[1].format = util_format_snorm8_to_sint8(dst->format);
589    }
590 
591    if (is_dcc_decompress)
592       image[1].access |= SI_IMAGE_ACCESS_DCC_OFF;
593    else if (sctx->chip_class >= GFX10)
594       image[1].access |= SI_IMAGE_ACCESS_ALLOW_DCC_STORE;
595 
596    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, 0, image);
597 
598    struct pipe_grid_info info = {0};
599 
600    if (is_dcc_decompress) {
601       /* The DCC decompression is a normal blit where the load is compressed
602        * and the store is uncompressed. The workgroup size is either equal to
603        * the DCC block size or a multiple thereof. The shader uses a barrier
604        * between loads and stores to safely overwrite each DCC block of pixels.
605        */
606       assert(src == dst);
607       assert(dst->target != PIPE_TEXTURE_1D && dst->target != PIPE_TEXTURE_1D_ARRAY);
608 
609       if (!sctx->cs_dcc_decompress)
610          sctx->cs_dcc_decompress = si_create_dcc_decompress_cs(ctx);
611 
612       unsigned block_x = ssrc->surface.u.gfx9.color.dcc_block_width;
613       unsigned block_y = ssrc->surface.u.gfx9.color.dcc_block_height;
614       unsigned block_z = ssrc->surface.u.gfx9.color.dcc_block_depth;
615 
616       unsigned default_wave_size = si_determine_wave_size(sctx->screen, NULL);;
617 
618       /* Make sure the block size is at least the same as wave size. */
619       while (block_x * block_y * block_z < default_wave_size) {
620          block_x *= 2;
621       }
622 
623       set_work_size(&info, block_x, block_y, block_z, src_box->width, src_box->height, src_box->depth);
624 
625       si_launch_grid_internal(sctx, &info, sctx->cs_dcc_decompress, flags | SI_OP_CS_IMAGE);
626    } else {
627       sctx->cs_user_data[0] = src_box->x | (dstx << 16);
628 
629       int block_x = is_1D || is_linear ? 64 : 8;
630       int block_y = is_1D || is_linear ? 1 : 8;
631       int block_z = 1;
632 
633       if (is_1D) {
634          assert(height == 1); /* height is not used for 1D images */
635          assert(src_box->y == 0 && dsty == 0);
636 
637          sctx->cs_user_data[1] = src_box->z | (dstz << 16);
638 
639          /* We pass array index in 'y' for 1D images. */
640          height = depth;
641          depth = 1;
642       } else {
643          sctx->cs_user_data[1] = src_box->y | (dsty << 16);
644          sctx->cs_user_data[2] = src_box->z | (dstz << 16);
645       }
646 
647       set_work_size(&info, block_x, block_y, block_z, width, height, depth);
648 
649       void **copy_image_cs_ptr = is_1D ? &sctx->cs_copy_image_1D : &sctx->cs_copy_image_2D;
650       if (!*copy_image_cs_ptr)
651          *copy_image_cs_ptr = si_create_copy_image_cs(sctx, is_1D);
652 
653       assert(*copy_image_cs_ptr);
654 
655       si_launch_grid_internal(sctx, &info, *copy_image_cs_ptr, flags | SI_OP_CS_IMAGE);
656    }
657 
658    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, 0, saved_image);
659    for (int i = 0; i < 2; i++)
660       pipe_resource_reference(&saved_image[i].resource, NULL);
661 }
662 
si_retile_dcc(struct si_context * sctx,struct si_texture * tex)663 void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)
664 {
665    /* Set the DCC buffer. */
666    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
667    assert(tex->surface.display_dcc_offset && tex->surface.display_dcc_offset <= UINT_MAX);
668    assert(tex->surface.display_dcc_offset < tex->surface.meta_offset);
669    assert(tex->buffer.bo_size <= UINT_MAX);
670 
671    struct pipe_shader_buffer sb = {};
672    sb.buffer = &tex->buffer.b.b;
673    sb.buffer_offset = tex->surface.display_dcc_offset;
674    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
675 
676    sctx->cs_user_data[0] = tex->surface.meta_offset - tex->surface.display_dcc_offset;
677    sctx->cs_user_data[1] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
678                            (tex->surface.u.gfx9.color.dcc_height << 16);
679    sctx->cs_user_data[2] = (tex->surface.u.gfx9.color.display_dcc_pitch_max + 1) |
680                            (tex->surface.u.gfx9.color.display_dcc_height << 16);
681 
682    /* We have only 1 variant per bpp for now, so expect 32 bpp. */
683    assert(tex->surface.bpe == 4);
684 
685    void **shader = &sctx->cs_dcc_retile[tex->surface.u.gfx9.swizzle_mode];
686    if (!*shader)
687       *shader = si_create_dcc_retile_cs(sctx, &tex->surface);
688 
689    /* Dispatch compute. */
690    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
691    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
692 
693    struct pipe_grid_info info = {};
694    info.block[0] = 8;
695    info.block[1] = 8;
696    info.block[2] = 1;
697    info.last_block[0] = width % info.block[0];
698    info.last_block[1] = height % info.block[1];
699    info.grid[0] = DIV_ROUND_UP(width, info.block[0]);
700    info.grid[1] = DIV_ROUND_UP(height, info.block[1]);
701    info.grid[2] = 1;
702 
703    si_launch_grid_internal_ssbos(sctx, &info, *shader, SI_OP_SYNC_BEFORE,
704                                  SI_COHERENCY_CB_META, 1, &sb, 0x1);
705 
706    /* Don't flush caches. L2 will be flushed by the kernel fence. */
707 }
708 
gfx9_clear_dcc_msaa(struct si_context * sctx,struct pipe_resource * res,uint32_t clear_value,unsigned flags,enum si_coherency coher)709 void gfx9_clear_dcc_msaa(struct si_context *sctx, struct pipe_resource *res, uint32_t clear_value,
710                          unsigned flags, enum si_coherency coher)
711 {
712    struct si_texture *tex = (struct si_texture*)res;
713 
714    /* Set the DCC buffer. */
715    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
716    assert(tex->buffer.bo_size <= UINT_MAX);
717 
718    struct pipe_shader_buffer sb = {};
719    sb.buffer = &tex->buffer.b.b;
720    sb.buffer_offset = tex->surface.meta_offset;
721    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
722 
723    sctx->cs_user_data[0] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
724                            (tex->surface.u.gfx9.color.dcc_height << 16);
725    sctx->cs_user_data[1] = (clear_value & 0xffff) |
726                            ((uint32_t)tex->surface.tile_swizzle << 16);
727 
728    /* These variables identify the shader variant. */
729    unsigned swizzle_mode = tex->surface.u.gfx9.swizzle_mode;
730    unsigned bpe_log2 = util_logbase2(tex->surface.bpe);
731    unsigned log2_samples = util_logbase2(tex->buffer.b.b.nr_samples);
732    bool fragments8 = tex->buffer.b.b.nr_storage_samples == 8;
733    bool is_array = tex->buffer.b.b.array_size > 1;
734    void **shader = &sctx->cs_clear_dcc_msaa[swizzle_mode][bpe_log2][fragments8][log2_samples - 2][is_array];
735 
736    if (!*shader)
737       *shader = gfx9_create_clear_dcc_msaa_cs(sctx, tex);
738 
739    /* Dispatch compute. */
740    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
741    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
742    unsigned depth = DIV_ROUND_UP(tex->buffer.b.b.array_size, tex->surface.u.gfx9.color.dcc_block_depth);
743 
744    struct pipe_grid_info info = {};
745    info.block[0] = 8;
746    info.block[1] = 8;
747    info.block[2] = 1;
748    info.last_block[0] = width % info.block[0];
749    info.last_block[1] = height % info.block[1];
750    info.grid[0] = DIV_ROUND_UP(width, info.block[0]);
751    info.grid[1] = DIV_ROUND_UP(height, info.block[1]);
752    info.grid[2] = depth;
753 
754    si_launch_grid_internal_ssbos(sctx, &info, *shader, flags, coher, 1, &sb, 0x1);
755 }
756 
757 /* Expand FMASK to make it identity, so that image stores can ignore it. */
si_compute_expand_fmask(struct pipe_context * ctx,struct pipe_resource * tex)758 void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex)
759 {
760    struct si_context *sctx = (struct si_context *)ctx;
761    bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY;
762    unsigned log_fragments = util_logbase2(tex->nr_storage_samples);
763    unsigned log_samples = util_logbase2(tex->nr_samples);
764    assert(tex->nr_samples >= 2);
765 
766    /* EQAA FMASK expansion is unimplemented. */
767    if (tex->nr_samples != tex->nr_storage_samples)
768       return;
769 
770    si_make_CB_shader_coherent(sctx, tex->nr_samples, true,
771                               ((struct si_texture*)tex)->surface.u.gfx9.color.dcc.pipe_aligned);
772 
773    /* Save states. */
774    struct pipe_image_view saved_image = {0};
775    util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]);
776 
777    /* Bind the image. */
778    struct pipe_image_view image = {0};
779    image.resource = tex;
780    /* Don't set WRITE so as not to trigger FMASK expansion, causing
781     * an infinite loop. */
782    image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ;
783    image.format = util_format_linear(tex->format);
784    if (is_array)
785       image.u.tex.last_layer = tex->array_size - 1;
786 
787    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &image);
788 
789    /* Bind the shader. */
790    void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array];
791    if (!*shader)
792       *shader = si_create_fmask_expand_cs(ctx, tex->nr_samples, is_array);
793 
794    /* Dispatch compute. */
795    struct pipe_grid_info info = {0};
796    info.block[0] = 8;
797    info.last_block[0] = tex->width0 % 8;
798    info.block[1] = 8;
799    info.last_block[1] = tex->height0 % 8;
800    info.block[2] = 1;
801    info.grid[0] = DIV_ROUND_UP(tex->width0, 8);
802    info.grid[1] = DIV_ROUND_UP(tex->height0, 8);
803    info.grid[2] = is_array ? tex->array_size : 1;
804 
805    si_launch_grid_internal(sctx, &info, *shader, SI_OP_SYNC_BEFORE_AFTER);
806 
807    /* Restore previous states. */
808    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &saved_image);
809    pipe_resource_reference(&saved_image.resource, NULL);
810 
811    /* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */
812 #define INVALID 0 /* never used */
813    static const uint64_t fmask_expand_values[][4] = {
814       /* samples */
815       /* 2 (8 bpp) 4 (8 bpp)   8 (8-32bpp) 16 (16-64bpp)      fragments */
816       {0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE},      /* 1 */
817       {0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4},      /* 2 */
818       {INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */
819       {INVALID, INVALID, 0x76543210, 0x8888888876543210},    /* 8 */
820    };
821 
822    /* Clear FMASK to identity. */
823    struct si_texture *stex = (struct si_texture *)tex;
824    si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,
825                    (uint32_t *)&fmask_expand_values[log_fragments][log_samples - 1],
826                    log_fragments >= 2 && log_samples == 4 ? 8 : 4, SI_OP_SYNC_AFTER,
827                    SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);
828 }
829 
si_init_compute_blit_functions(struct si_context * sctx)830 void si_init_compute_blit_functions(struct si_context *sctx)
831 {
832    sctx->b.clear_buffer = si_pipe_clear_buffer;
833 }
834 
835 /* Clear a region of a color surface to a constant value. */
si_compute_clear_render_target(struct pipe_context * ctx,struct pipe_surface * dstsurf,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)836 void si_compute_clear_render_target(struct pipe_context *ctx, struct pipe_surface *dstsurf,
837                                     const union pipe_color_union *color, unsigned dstx,
838                                     unsigned dsty, unsigned width, unsigned height,
839                                     bool render_condition_enabled)
840 {
841    struct si_context *sctx = (struct si_context *)ctx;
842    struct si_texture *tex = (struct si_texture*)dstsurf->texture;
843    unsigned num_layers = dstsurf->u.tex.last_layer - dstsurf->u.tex.first_layer + 1;
844    unsigned data[4 + sizeof(color->ui)] = {dstx, dsty, dstsurf->u.tex.first_layer, 0};
845 
846    if (width == 0 || height == 0)
847       return;
848 
849    /* The driver doesn't decompress resources automatically here. */
850    si_decompress_subresource(ctx, dstsurf->texture, PIPE_MASK_RGBA, dstsurf->u.tex.level,
851                              dstsurf->u.tex.first_layer, dstsurf->u.tex.last_layer);
852 
853    if (util_format_is_srgb(dstsurf->format)) {
854       union pipe_color_union color_srgb;
855       for (int i = 0; i < 3; i++)
856          color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]);
857       color_srgb.f[3] = color->f[3];
858       memcpy(data + 4, color_srgb.ui, sizeof(color->ui));
859    } else {
860       memcpy(data + 4, color->ui, sizeof(color->ui));
861    }
862 
863    si_make_CB_shader_coherent(sctx, dstsurf->texture->nr_samples, true,
864                               tex->surface.u.gfx9.color.dcc.pipe_aligned);
865 
866    struct pipe_constant_buffer saved_cb = {};
867    si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
868 
869    struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
870    struct pipe_image_view saved_image = {0};
871    util_copy_image_view(&saved_image, &images->views[0]);
872 
873    struct pipe_constant_buffer cb = {};
874    cb.buffer_size = sizeof(data);
875    cb.user_buffer = data;
876    ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);
877 
878    struct pipe_image_view image = {0};
879    image.resource = dstsurf->texture;
880    image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE | SI_IMAGE_ACCESS_ALLOW_DCC_STORE;
881    image.format = util_format_linear(dstsurf->format);
882    image.u.tex.level = dstsurf->u.tex.level;
883    image.u.tex.first_layer = 0; /* 3D images ignore first_layer (BASE_ARRAY) */
884    image.u.tex.last_layer = dstsurf->u.tex.last_layer;
885 
886    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &image);
887 
888    struct pipe_grid_info info = {0};
889    void *shader;
890 
891    if (dstsurf->texture->target != PIPE_TEXTURE_1D_ARRAY) {
892       if (!sctx->cs_clear_render_target)
893          sctx->cs_clear_render_target = si_clear_render_target_shader(ctx);
894       shader = sctx->cs_clear_render_target;
895 
896       info.block[0] = 8;
897       info.last_block[0] = width % 8;
898       info.block[1] = 8;
899       info.last_block[1] = height % 8;
900       info.block[2] = 1;
901       info.grid[0] = DIV_ROUND_UP(width, 8);
902       info.grid[1] = DIV_ROUND_UP(height, 8);
903       info.grid[2] = num_layers;
904    } else {
905       if (!sctx->cs_clear_render_target_1d_array)
906          sctx->cs_clear_render_target_1d_array = si_clear_render_target_shader_1d_array(ctx);
907       shader = sctx->cs_clear_render_target_1d_array;
908 
909       info.block[0] = 64;
910       info.last_block[0] = width % 64;
911       info.block[1] = 1;
912       info.block[2] = 1;
913       info.grid[0] = DIV_ROUND_UP(width, 64);
914       info.grid[1] = num_layers;
915       info.grid[2] = 1;
916    }
917 
918    si_launch_grid_internal(sctx, &info, shader, SI_OP_SYNC_BEFORE_AFTER | SI_OP_CS_IMAGE |
919                            (render_condition_enabled ? SI_OP_CS_RENDER_COND_ENABLE : 0));
920 
921    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &saved_image);
922    ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, true, &saved_cb);
923    pipe_resource_reference(&saved_image.resource, NULL);
924 }
925