1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "sid.h"
27 #include "util/format/u_format.h"
28 #include "util/u_pack_color.h"
29 #include "util/u_surface.h"
30
31 enum
32 {
33 SI_CLEAR = SI_SAVE_FRAGMENT_STATE,
34 SI_CLEAR_SURFACE = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE,
35 };
36
si_init_buffer_clear(struct si_clear_info * info,struct pipe_resource * resource,uint64_t offset,uint32_t size,uint32_t clear_value)37 void si_init_buffer_clear(struct si_clear_info *info,
38 struct pipe_resource *resource, uint64_t offset,
39 uint32_t size, uint32_t clear_value)
40 {
41 info->resource = resource;
42 info->offset = offset;
43 info->size = size;
44 info->clear_value = clear_value;
45 info->writemask = 0xffffffff;
46 info->is_dcc_msaa = false;
47 }
48
si_init_buffer_clear_rmw(struct si_clear_info * info,struct pipe_resource * resource,uint64_t offset,uint32_t size,uint32_t clear_value,uint32_t writemask)49 static void si_init_buffer_clear_rmw(struct si_clear_info *info,
50 struct pipe_resource *resource, uint64_t offset,
51 uint32_t size, uint32_t clear_value, uint32_t writemask)
52 {
53 si_init_buffer_clear(info, resource, offset, size, clear_value);
54 info->writemask = writemask;
55 }
56
si_execute_clears(struct si_context * sctx,struct si_clear_info * info,unsigned num_clears,unsigned types)57 void si_execute_clears(struct si_context *sctx, struct si_clear_info *info,
58 unsigned num_clears, unsigned types)
59 {
60 if (!num_clears)
61 return;
62
63 /* Flush caches and wait for idle. */
64 if (types & (SI_CLEAR_TYPE_CMASK | SI_CLEAR_TYPE_DCC))
65 sctx->flags |= si_get_flush_flags(sctx, SI_COHERENCY_CB_META, L2_LRU);
66
67 if (types & SI_CLEAR_TYPE_HTILE)
68 sctx->flags |= si_get_flush_flags(sctx, SI_COHERENCY_DB_META, L2_LRU);
69
70 /* Flush caches in case we use compute. */
71 sctx->flags |= SI_CONTEXT_INV_VCACHE;
72
73 /* GFX6-8: CB and DB don't use L2. */
74 if (sctx->chip_class <= GFX8)
75 sctx->flags |= SI_CONTEXT_INV_L2;
76
77 /* Execute clears. */
78 for (unsigned i = 0; i < num_clears; i++) {
79 if (info[i].is_dcc_msaa) {
80 gfx9_clear_dcc_msaa(sctx, info[i].resource, info[i].clear_value,
81 SI_OP_SKIP_CACHE_INV_BEFORE, SI_COHERENCY_CP);
82 continue;
83 }
84
85 assert(info[i].size > 0);
86
87 if (info[i].writemask != 0xffffffff) {
88 si_compute_clear_buffer_rmw(sctx, info[i].resource, info[i].offset, info[i].size,
89 info[i].clear_value, info[i].writemask,
90 SI_OP_SKIP_CACHE_INV_BEFORE, SI_COHERENCY_CP);
91 } else {
92 /* Compute shaders are much faster on both dGPUs and APUs. Don't use CP DMA. */
93 si_clear_buffer(sctx, info[i].resource, info[i].offset, info[i].size,
94 &info[i].clear_value, 4, SI_OP_SKIP_CACHE_INV_BEFORE,
95 SI_COHERENCY_CP, SI_COMPUTE_CLEAR_METHOD);
96 }
97 }
98
99 /* Wait for idle. */
100 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
101
102 /* GFX6-8: CB and DB don't use L2. */
103 if (sctx->chip_class <= GFX8)
104 sctx->flags |= SI_CONTEXT_WB_L2;
105 }
106
si_alloc_separate_cmask(struct si_screen * sscreen,struct si_texture * tex)107 static bool si_alloc_separate_cmask(struct si_screen *sscreen, struct si_texture *tex)
108 {
109 /* CMASK for MSAA is allocated in advance or always disabled
110 * by "nofmask" option.
111 */
112 if (tex->cmask_buffer)
113 return true;
114
115 if (!tex->surface.cmask_size)
116 return false;
117
118 tex->cmask_buffer =
119 si_aligned_buffer_create(&sscreen->b, SI_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT,
120 tex->surface.cmask_size, 1 << tex->surface.cmask_alignment_log2);
121 if (tex->cmask_buffer == NULL)
122 return false;
123
124 tex->cmask_base_address_reg = tex->cmask_buffer->gpu_address >> 8;
125 tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
126
127 p_atomic_inc(&sscreen->compressed_colortex_counter);
128 return true;
129 }
130
si_set_clear_color(struct si_texture * tex,enum pipe_format surface_format,const union pipe_color_union * color)131 static bool si_set_clear_color(struct si_texture *tex, enum pipe_format surface_format,
132 const union pipe_color_union *color)
133 {
134 union util_color uc;
135
136 memset(&uc, 0, sizeof(uc));
137
138 if (tex->surface.bpe == 16) {
139 /* DCC fast clear only:
140 * CLEAR_WORD0 = R = G = B
141 * CLEAR_WORD1 = A
142 */
143 assert(color->ui[0] == color->ui[1] && color->ui[0] == color->ui[2]);
144 uc.ui[0] = color->ui[0];
145 uc.ui[1] = color->ui[3];
146 } else {
147 if (tex->swap_rgb_to_bgr)
148 surface_format = util_format_rgb_to_bgr(surface_format);
149
150 util_pack_color_union(surface_format, &uc, color);
151 }
152
153 if (memcmp(tex->color_clear_value, &uc, 2 * sizeof(uint32_t)) == 0)
154 return false;
155
156 memcpy(tex->color_clear_value, &uc, 2 * sizeof(uint32_t));
157 return true;
158 }
159
160 /** Linearize and convert luminance/intensity to red. */
si_simplify_cb_format(enum pipe_format format)161 enum pipe_format si_simplify_cb_format(enum pipe_format format)
162 {
163 format = util_format_linear(format);
164 format = util_format_luminance_to_red(format);
165 return util_format_intensity_to_red(format);
166 }
167
vi_alpha_is_on_msb(struct si_screen * sscreen,enum pipe_format format)168 bool vi_alpha_is_on_msb(struct si_screen *sscreen, enum pipe_format format)
169 {
170 format = si_simplify_cb_format(format);
171 const struct util_format_description *desc = util_format_description(format);
172
173 /* Formats with 3 channels can't have alpha. */
174 if (desc->nr_channels == 3)
175 return true; /* same as xxxA; is any value OK here? */
176
177 if (sscreen->info.chip_class >= GFX10 && desc->nr_channels == 1)
178 return desc->swizzle[3] == PIPE_SWIZZLE_X;
179
180 return si_translate_colorswap(format, false) <= 1;
181 }
182
vi_get_fast_clear_parameters(struct si_screen * sscreen,enum pipe_format base_format,enum pipe_format surface_format,const union pipe_color_union * color,uint32_t * clear_value,bool * eliminate_needed)183 static bool vi_get_fast_clear_parameters(struct si_screen *sscreen, enum pipe_format base_format,
184 enum pipe_format surface_format,
185 const union pipe_color_union *color, uint32_t *clear_value,
186 bool *eliminate_needed)
187 {
188 /* If we want to clear without needing a fast clear eliminate step, we
189 * can set color and alpha independently to 0 or 1 (or 0/max for integer
190 * formats).
191 */
192 bool values[4] = {}; /* whether to clear to 0 or 1 */
193 bool color_value = false; /* clear color to 0 or 1 */
194 bool alpha_value = false; /* clear alpha to 0 or 1 */
195 int alpha_channel; /* index of the alpha component */
196 bool has_color = false;
197 bool has_alpha = false;
198
199 const struct util_format_description *desc =
200 util_format_description(si_simplify_cb_format(surface_format));
201
202 /* 128-bit fast clear with different R,G,B values is unsupported. */
203 if (desc->block.bits == 128 && (color->ui[0] != color->ui[1] || color->ui[0] != color->ui[2]))
204 return false;
205
206 *eliminate_needed = true;
207 *clear_value = DCC_CLEAR_COLOR_REG;
208
209 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
210 return true; /* need ELIMINATE_FAST_CLEAR */
211
212 bool base_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, base_format);
213 bool surf_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, surface_format);
214
215 /* Formats with 3 channels can't have alpha. */
216 if (desc->nr_channels == 3)
217 alpha_channel = -1;
218 else if (surf_alpha_is_on_msb)
219 alpha_channel = desc->nr_channels - 1;
220 else
221 alpha_channel = 0;
222
223 for (int i = 0; i < 4; ++i) {
224 if (desc->swizzle[i] >= PIPE_SWIZZLE_0)
225 continue;
226
227 if (desc->channel[i].pure_integer && desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
228 /* Use the maximum value for clamping the clear color. */
229 int max = u_bit_consecutive(0, desc->channel[i].size - 1);
230
231 values[i] = color->i[i] != 0;
232 if (color->i[i] != 0 && MIN2(color->i[i], max) != max)
233 return true; /* need ELIMINATE_FAST_CLEAR */
234 } else if (desc->channel[i].pure_integer &&
235 desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
236 /* Use the maximum value for clamping the clear color. */
237 unsigned max = u_bit_consecutive(0, desc->channel[i].size);
238
239 values[i] = color->ui[i] != 0U;
240 if (color->ui[i] != 0U && MIN2(color->ui[i], max) != max)
241 return true; /* need ELIMINATE_FAST_CLEAR */
242 } else {
243 values[i] = color->f[i] != 0.0F;
244 if (color->f[i] != 0.0F && color->f[i] != 1.0F)
245 return true; /* need ELIMINATE_FAST_CLEAR */
246 }
247
248 if (desc->swizzle[i] == alpha_channel) {
249 alpha_value = values[i];
250 has_alpha = true;
251 } else {
252 color_value = values[i];
253 has_color = true;
254 }
255 }
256
257 /* If alpha isn't present, make it the same as color, and vice versa. */
258 if (!has_alpha)
259 alpha_value = color_value;
260 else if (!has_color)
261 color_value = alpha_value;
262
263 if (color_value != alpha_value && base_alpha_is_on_msb != surf_alpha_is_on_msb)
264 return true; /* require ELIMINATE_FAST_CLEAR */
265
266 /* Check if all color values are equal if they are present. */
267 for (int i = 0; i < 4; ++i) {
268 if (desc->swizzle[i] <= PIPE_SWIZZLE_W && desc->swizzle[i] != alpha_channel &&
269 values[i] != color_value)
270 return true; /* require ELIMINATE_FAST_CLEAR */
271 }
272
273 /* This doesn't need ELIMINATE_FAST_CLEAR.
274 * On chips predating Raven2, the DCC clear codes and the CB clear
275 * color registers must match.
276 */
277 *eliminate_needed = false;
278
279 if (color_value) {
280 if (alpha_value)
281 *clear_value = DCC_CLEAR_COLOR_1111;
282 else
283 *clear_value = DCC_CLEAR_COLOR_1110;
284 } else {
285 if (alpha_value)
286 *clear_value = DCC_CLEAR_COLOR_0001;
287 else
288 *clear_value = DCC_CLEAR_COLOR_0000;
289 }
290 return true;
291 }
292
vi_dcc_get_clear_info(struct si_context * sctx,struct si_texture * tex,unsigned level,unsigned clear_value,struct si_clear_info * out)293 bool vi_dcc_get_clear_info(struct si_context *sctx, struct si_texture *tex, unsigned level,
294 unsigned clear_value, struct si_clear_info *out)
295 {
296 struct pipe_resource *dcc_buffer = &tex->buffer.b.b;
297 uint64_t dcc_offset = tex->surface.meta_offset;
298 uint32_t clear_size;
299
300 assert(vi_dcc_enabled(tex, level));
301
302 if (sctx->chip_class >= GFX10) {
303 /* 4x and 8x MSAA needs a sophisticated compute shader for
304 * the clear. */
305 if (tex->buffer.b.b.nr_storage_samples >= 4)
306 return false;
307
308 unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
309
310 if (num_layers == 1) {
311 /* Clear a specific level. */
312 dcc_offset += tex->surface.u.gfx9.meta_levels[level].offset;
313 clear_size = tex->surface.u.gfx9.meta_levels[level].size;
314 } else if (tex->buffer.b.b.last_level == 0) {
315 /* Clear all layers having only 1 level. */
316 clear_size = tex->surface.meta_size;
317 } else {
318 /* Clearing DCC with both multiple levels and multiple layers is not
319 * implemented.
320 */
321 return false;
322 }
323 } else if (sctx->chip_class == GFX9) {
324 /* TODO: Implement DCC fast clear for level 0 of mipmapped textures. Mipmapped
325 * DCC has to clear a rectangular area of DCC for level 0 (because the whole miptree
326 * is organized in a 2D plane).
327 */
328 if (tex->buffer.b.b.last_level > 0)
329 return false;
330
331 /* 4x and 8x MSAA need to clear only sample 0 and 1 in a compute shader and leave other
332 * samples untouched. (only the first 2 samples are compressed) */
333 if (tex->buffer.b.b.nr_storage_samples >= 4) {
334 si_init_buffer_clear(out, dcc_buffer, 0, 0, clear_value);
335 out->is_dcc_msaa = true;
336 return true;
337 }
338
339 clear_size = tex->surface.meta_size;
340 } else {
341 unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
342
343 /* If this is 0, fast clear isn't possible. (can occur with MSAA) */
344 if (!tex->surface.u.legacy.color.dcc_level[level].dcc_fast_clear_size)
345 return false;
346
347 /* Layered 4x and 8x MSAA DCC fast clears need to clear
348 * dcc_fast_clear_size bytes for each layer. A compute shader
349 * would be more efficient than separate per-layer clear operations.
350 */
351 if (tex->buffer.b.b.nr_storage_samples >= 4 && num_layers > 1)
352 return false;
353
354 dcc_offset += tex->surface.u.legacy.color.dcc_level[level].dcc_offset;
355 clear_size = tex->surface.u.legacy.color.dcc_level[level].dcc_fast_clear_size;
356 }
357
358 si_init_buffer_clear(out, dcc_buffer, dcc_offset, clear_size, clear_value);
359 return true;
360 }
361
362 /* Set the same micro tile mode as the destination of the last MSAA resolve.
363 * This allows hitting the MSAA resolve fast path, which requires that both
364 * src and dst micro tile modes match.
365 */
si_set_optimal_micro_tile_mode(struct si_screen * sscreen,struct si_texture * tex)366 static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen, struct si_texture *tex)
367 {
368 if (sscreen->info.chip_class >= GFX10 || tex->buffer.b.is_shared ||
369 tex->buffer.b.b.nr_samples <= 1 ||
370 tex->surface.micro_tile_mode == tex->last_msaa_resolve_target_micro_mode)
371 return;
372
373 assert(sscreen->info.chip_class >= GFX9 ||
374 tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
375 assert(tex->buffer.b.b.last_level == 0);
376
377 if (sscreen->info.chip_class >= GFX9) {
378 /* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
379 assert(tex->surface.u.gfx9.swizzle_mode >= 4);
380
381 /* If you do swizzle_mode % 4, you'll get:
382 * 0 = Depth
383 * 1 = Standard,
384 * 2 = Displayable
385 * 3 = Rotated
386 *
387 * Depth-sample order isn't allowed:
388 */
389 assert(tex->surface.u.gfx9.swizzle_mode % 4 != 0);
390
391 switch (tex->last_msaa_resolve_target_micro_mode) {
392 case RADEON_MICRO_MODE_DISPLAY:
393 tex->surface.u.gfx9.swizzle_mode &= ~0x3;
394 tex->surface.u.gfx9.swizzle_mode += 2; /* D */
395 break;
396 case RADEON_MICRO_MODE_STANDARD:
397 tex->surface.u.gfx9.swizzle_mode &= ~0x3;
398 tex->surface.u.gfx9.swizzle_mode += 1; /* S */
399 break;
400 case RADEON_MICRO_MODE_RENDER:
401 tex->surface.u.gfx9.swizzle_mode &= ~0x3;
402 tex->surface.u.gfx9.swizzle_mode += 3; /* R */
403 break;
404 default: /* depth */
405 assert(!"unexpected micro mode");
406 return;
407 }
408 } else if (sscreen->info.chip_class >= GFX7) {
409 /* These magic numbers were copied from addrlib. It doesn't use
410 * any definitions for them either. They are all 2D_TILED_THIN1
411 * modes with different bpp and micro tile mode.
412 */
413 switch (tex->last_msaa_resolve_target_micro_mode) {
414 case RADEON_MICRO_MODE_DISPLAY:
415 tex->surface.u.legacy.tiling_index[0] = 10;
416 break;
417 case RADEON_MICRO_MODE_STANDARD:
418 tex->surface.u.legacy.tiling_index[0] = 14;
419 break;
420 case RADEON_MICRO_MODE_RENDER:
421 tex->surface.u.legacy.tiling_index[0] = 28;
422 break;
423 default: /* depth, thick */
424 assert(!"unexpected micro mode");
425 return;
426 }
427 } else { /* GFX6 */
428 switch (tex->last_msaa_resolve_target_micro_mode) {
429 case RADEON_MICRO_MODE_DISPLAY:
430 switch (tex->surface.bpe) {
431 case 1:
432 tex->surface.u.legacy.tiling_index[0] = 10;
433 break;
434 case 2:
435 tex->surface.u.legacy.tiling_index[0] = 11;
436 break;
437 default: /* 4, 8 */
438 tex->surface.u.legacy.tiling_index[0] = 12;
439 break;
440 }
441 break;
442 case RADEON_MICRO_MODE_STANDARD:
443 switch (tex->surface.bpe) {
444 case 1:
445 tex->surface.u.legacy.tiling_index[0] = 14;
446 break;
447 case 2:
448 tex->surface.u.legacy.tiling_index[0] = 15;
449 break;
450 case 4:
451 tex->surface.u.legacy.tiling_index[0] = 16;
452 break;
453 default: /* 8, 16 */
454 tex->surface.u.legacy.tiling_index[0] = 17;
455 break;
456 }
457 break;
458 default: /* depth, thick */
459 assert(!"unexpected micro mode");
460 return;
461 }
462 }
463
464 tex->surface.micro_tile_mode = tex->last_msaa_resolve_target_micro_mode;
465
466 p_atomic_inc(&sscreen->dirty_tex_counter);
467 }
468
si_get_htile_clear_value(struct si_texture * tex,float depth)469 static uint32_t si_get_htile_clear_value(struct si_texture *tex, float depth)
470 {
471 /* Maximum 14-bit UINT value. */
472 const uint32_t max_z_value = 0x3FFF;
473
474 /* For clears, Zmask and Smem will always be set to zero. */
475 const uint32_t zmask = 0;
476 const uint32_t smem = 0;
477
478 /* Convert depthValue to 14-bit zmin/zmax uint values. */
479 const uint32_t zmin = lroundf(depth * max_z_value);
480 const uint32_t zmax = zmin;
481
482 if (tex->htile_stencil_disabled) {
483 /* Z-only HTILE is laid out as follows:
484 * |31 18|17 4|3 0|
485 * +---------+---------+-------+
486 * | Max Z | Min Z | ZMask |
487 */
488 return ((zmax & 0x3FFF) << 18) |
489 ((zmin & 0x3FFF) << 4) |
490 ((zmask & 0xF) << 0);
491 } else {
492 /* Z+S HTILE is laid out as-follows:
493 * |31 12|11 10|9 8|7 6|5 4|3 0|
494 * +-----------+-----+------+-----+-----+-------+
495 * | Z Range | | SMem | SR1 | SR0 | ZMask |
496 *
497 * The base value for zRange is either zMax or zMin, depending on ZRANGE_PRECISION.
498 * For a fast clear, zMin == zMax == clearValue. This means that the base will
499 * always be the clear value (converted to 14-bit UINT).
500 *
501 * When abs(zMax-zMin) < 16, the delta is equal to the difference. In the case of
502 * fast clears, where zMax == zMin, the delta is always zero.
503 */
504 const uint32_t delta = 0;
505 const uint32_t zrange = (zmax << 6) | delta;
506
507 /* SResults 0 & 1 are set based on the stencil compare state.
508 * For fast-clear, the default value of sr0 and sr1 are both 0x3.
509 */
510 const uint32_t sresults = 0xf;
511
512 return ((zrange & 0xFFFFF) << 12) |
513 ((smem & 0x3) << 8) |
514 ((sresults & 0xF) << 4) |
515 ((zmask & 0xF) << 0);
516 }
517 }
518
si_can_fast_clear_depth(struct si_texture * zstex,unsigned level,float depth,unsigned buffers)519 static bool si_can_fast_clear_depth(struct si_texture *zstex, unsigned level, float depth,
520 unsigned buffers)
521 {
522 /* TC-compatible HTILE only supports depth clears to 0 or 1. */
523 return buffers & PIPE_CLEAR_DEPTH &&
524 si_htile_enabled(zstex, level, PIPE_MASK_Z) &&
525 (!zstex->tc_compatible_htile || depth == 0 || depth == 1);
526 }
527
si_can_fast_clear_stencil(struct si_texture * zstex,unsigned level,uint8_t stencil,unsigned buffers)528 static bool si_can_fast_clear_stencil(struct si_texture *zstex, unsigned level, uint8_t stencil,
529 unsigned buffers)
530 {
531 /* TC-compatible HTILE only supports stencil clears to 0. */
532 return buffers & PIPE_CLEAR_STENCIL &&
533 si_htile_enabled(zstex, level, PIPE_MASK_S) &&
534 (!zstex->tc_compatible_htile || stencil == 0);
535 }
536
si_fast_clear(struct si_context * sctx,unsigned * buffers,const union pipe_color_union * color,float depth,uint8_t stencil)537 static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
538 const union pipe_color_union *color, float depth, uint8_t stencil)
539 {
540 struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
541 struct si_clear_info info[8 * 2 + 1]; /* MRTs * (CMASK + DCC) + ZS */
542 unsigned num_clears = 0;
543 unsigned clear_types = 0;
544 unsigned num_pixels = fb->width * fb->height;
545
546 /* This function is broken in BE, so just disable this path for now */
547 #if UTIL_ARCH_BIG_ENDIAN
548 return;
549 #endif
550
551 if (sctx->render_cond)
552 return;
553
554 /* Gather information about what to clear. */
555 unsigned color_buffer_mask = (*buffers & PIPE_CLEAR_COLOR) >> util_logbase2(PIPE_CLEAR_COLOR0);
556 while (color_buffer_mask) {
557 unsigned i = u_bit_scan(&color_buffer_mask);
558
559 struct si_texture *tex = (struct si_texture *)fb->cbufs[i]->texture;
560 unsigned level = fb->cbufs[i]->u.tex.level;
561 unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
562
563 /* the clear is allowed if all layers are bound */
564 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
565 fb->cbufs[i]->u.tex.last_layer != num_layers - 1) {
566 continue;
567 }
568
569 /* We can change the micro tile mode before a full clear. */
570 /* This is only used for MSAA textures when clearing all layers. */
571 si_set_optimal_micro_tile_mode(sctx->screen, tex);
572
573 if (tex->swap_rgb_to_bgr_on_next_clear) {
574 assert(!tex->swap_rgb_to_bgr);
575 assert(tex->buffer.b.b.nr_samples >= 2);
576 tex->swap_rgb_to_bgr = true;
577 tex->swap_rgb_to_bgr_on_next_clear = false;
578
579 /* Update all sampler views and images. */
580 p_atomic_inc(&sctx->screen->dirty_tex_counter);
581 }
582
583 /* only supported on tiled surfaces */
584 if (tex->surface.is_linear) {
585 continue;
586 }
587
588 if (sctx->chip_class <= GFX8 && tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
589 !sctx->screen->info.htile_cmask_support_1d_tiling)
590 continue;
591
592 /* Use a slow clear for small surfaces where the cost of
593 * the eliminate pass can be higher than the benefit of fast
594 * clear. The closed driver does this, but the numbers may differ.
595 *
596 * This helps on both dGPUs and APUs, even small APUs like Mullins.
597 */
598 bool fb_too_small = num_pixels * num_layers <= 512 * 512;
599 bool too_small = tex->buffer.b.b.nr_samples <= 1 && fb_too_small;
600 bool eliminate_needed = false;
601 bool fmask_decompress_needed = false;
602
603 /* Try to clear DCC first, otherwise try CMASK. */
604 if (vi_dcc_enabled(tex, level)) {
605 uint32_t reset_value;
606
607 if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
608 continue;
609
610 if (!vi_get_fast_clear_parameters(sctx->screen, tex->buffer.b.b.format,
611 fb->cbufs[i]->format, color, &reset_value,
612 &eliminate_needed))
613 continue;
614
615 /* Shared textures can't use fast clear without an explicit flush
616 * because the clear color is not exported.
617 *
618 * Chips without DCC constant encoding must set the clear color registers
619 * correctly even if the fast clear eliminate pass is not needed.
620 */
621 if ((eliminate_needed || !sctx->screen->info.has_dcc_constant_encode) &&
622 tex->buffer.b.is_shared &&
623 !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
624 continue;
625
626 if (eliminate_needed && too_small)
627 continue;
628
629 /* We can clear any level, but we only set up the clear value registers for the first
630 * level. Therefore, all other levels can be cleared only if the clear value registers
631 * are not used, which is only the case with DCC constant encoding and 0/1 clear values.
632 */
633 if (level > 0 && (eliminate_needed || !sctx->screen->info.has_dcc_constant_encode))
634 continue;
635
636 if (tex->buffer.b.b.nr_samples >= 2 && eliminate_needed &&
637 !sctx->screen->allow_dcc_msaa_clear_to_reg_for_bpp[util_logbase2(tex->surface.bpe)])
638 continue;
639
640 assert(num_clears < ARRAY_SIZE(info));
641
642 if (!vi_dcc_get_clear_info(sctx, tex, level, reset_value, &info[num_clears]))
643 continue;
644
645 num_clears++;
646 clear_types |= SI_CLEAR_TYPE_DCC;
647
648 si_mark_display_dcc_dirty(sctx, tex);
649
650 /* DCC fast clear with MSAA should clear CMASK to 0xC. */
651 if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer) {
652 assert(num_clears < ARRAY_SIZE(info));
653 si_init_buffer_clear(&info[num_clears++], &tex->cmask_buffer->b.b,
654 tex->surface.cmask_offset, tex->surface.cmask_size, 0xCCCCCCCC);
655 clear_types |= SI_CLEAR_TYPE_CMASK;
656 fmask_decompress_needed = true;
657 }
658 } else {
659 if (level > 0)
660 continue;
661
662 /* Shared textures can't use fast clear without an explicit flush
663 * because the clear color is not exported.
664 */
665 if (tex->buffer.b.is_shared &&
666 !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
667 continue;
668
669 if (too_small)
670 continue;
671
672 /* 128-bit formats are unsupported */
673 if (tex->surface.bpe > 8) {
674 continue;
675 }
676
677 /* RB+ doesn't work with CMASK fast clear on Stoney. */
678 if (sctx->family == CHIP_STONEY)
679 continue;
680
681 /* Disable fast clear if tex is encrypted */
682 if (tex->buffer.flags & RADEON_FLAG_ENCRYPTED)
683 continue;
684
685 uint64_t cmask_offset = 0;
686 unsigned clear_size = 0;
687
688 if (sctx->chip_class >= GFX10) {
689 assert(level == 0);
690
691 /* Clearing CMASK with both multiple levels and multiple layers is not
692 * implemented.
693 */
694 if (num_layers > 1 && tex->buffer.b.b.last_level > 0)
695 continue;
696
697 if (!si_alloc_separate_cmask(sctx->screen, tex))
698 continue;
699
700 if (num_layers == 1) {
701 /* Clear level 0. */
702 cmask_offset = tex->surface.cmask_offset + tex->surface.u.gfx9.color.cmask_level0.offset;
703 clear_size = tex->surface.u.gfx9.color.cmask_level0.size;
704 } else if (tex->buffer.b.b.last_level == 0) {
705 /* Clear all layers having only 1 level. */
706 cmask_offset = tex->surface.cmask_offset;
707 clear_size = tex->surface.cmask_size;
708 } else {
709 assert(0); /* this is prevented above */
710 }
711 } else if (sctx->chip_class == GFX9) {
712 /* TODO: Implement CMASK fast clear for level 0 of mipmapped textures. Mipmapped
713 * CMASK has to clear a rectangular area of CMASK for level 0 (because the whole
714 * miptree is organized in a 2D plane).
715 */
716 if (tex->buffer.b.b.last_level > 0)
717 continue;
718
719 if (!si_alloc_separate_cmask(sctx->screen, tex))
720 continue;
721
722 cmask_offset = tex->surface.cmask_offset;
723 clear_size = tex->surface.cmask_size;
724 } else {
725 if (!si_alloc_separate_cmask(sctx->screen, tex))
726 continue;
727
728 /* GFX6-8: This only covers mipmap level 0. */
729 cmask_offset = tex->surface.cmask_offset;
730 clear_size = tex->surface.cmask_size;
731 }
732
733 /* Do the fast clear. */
734 assert(num_clears < ARRAY_SIZE(info));
735 si_init_buffer_clear(&info[num_clears++], &tex->cmask_buffer->b.b,
736 cmask_offset, clear_size, 0);
737 clear_types |= SI_CLEAR_TYPE_CMASK;
738 eliminate_needed = true;
739 }
740
741 if ((eliminate_needed || fmask_decompress_needed) &&
742 !(tex->dirty_level_mask & (1 << level))) {
743 tex->dirty_level_mask |= 1 << level;
744 p_atomic_inc(&sctx->screen->compressed_colortex_counter);
745 }
746
747 *buffers &= ~(PIPE_CLEAR_COLOR0 << i);
748
749 /* Chips with DCC constant encoding don't need to set the clear
750 * color registers for DCC clear values 0 and 1.
751 */
752 if (sctx->screen->info.has_dcc_constant_encode && !eliminate_needed)
753 continue;
754
755 if (si_set_clear_color(tex, fb->cbufs[i]->format, color)) {
756 sctx->framebuffer.dirty_cbufs |= 1 << i;
757 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
758 }
759 }
760
761 /* Depth/stencil clears. */
762 struct pipe_surface *zsbuf = fb->zsbuf;
763 struct si_texture *zstex = zsbuf ? (struct si_texture *)zsbuf->texture : NULL;
764 unsigned zs_num_layers = zstex ? util_num_layers(&zstex->buffer.b.b, zsbuf->u.tex.level) : 0;
765
766 if (zstex && zsbuf->u.tex.first_layer == 0 &&
767 zsbuf->u.tex.last_layer == zs_num_layers - 1 &&
768 si_htile_enabled(zstex, zsbuf->u.tex.level, PIPE_MASK_ZS)) {
769 unsigned level = zsbuf->u.tex.level;
770 bool update_db_depth_clear = false;
771 bool update_db_stencil_clear = false;
772 bool fb_too_small = num_pixels * zs_num_layers <= 512 * 512;
773
774 /* Transition from TC-incompatible to TC-compatible HTILE if requested. */
775 if (zstex->enable_tc_compatible_htile_next_clear) {
776 /* If both depth and stencil are present, they must be cleared together. */
777 if ((*buffers & PIPE_CLEAR_DEPTHSTENCIL) == PIPE_CLEAR_DEPTHSTENCIL ||
778 (*buffers & PIPE_CLEAR_DEPTH && (!zstex->surface.has_stencil ||
779 zstex->htile_stencil_disabled))) {
780 /* The conversion from TC-incompatible to TC-compatible can only be done in one clear. */
781 assert(zstex->buffer.b.b.last_level == 0);
782 assert(!zstex->tc_compatible_htile);
783
784 /* Enable TC-compatible HTILE. */
785 zstex->enable_tc_compatible_htile_next_clear = false;
786 zstex->tc_compatible_htile = true;
787
788 /* Update the framebuffer state to reflect the change. */
789 sctx->framebuffer.DB_has_shader_readable_metadata = true;
790 sctx->framebuffer.dirty_zsbuf = true;
791 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
792
793 /* Update all sampler views and shader images in all contexts. */
794 p_atomic_inc(&sctx->screen->dirty_tex_counter);
795
796 /* Perform the clear here if possible, else clear to uncompressed. */
797 uint32_t clear_value;
798
799 if (zstex->htile_stencil_disabled || !zstex->surface.has_stencil) {
800 if (si_can_fast_clear_depth(zstex, level, depth, *buffers)) {
801 /* Z-only clear. */
802 clear_value = si_get_htile_clear_value(zstex, depth);
803 *buffers &= ~PIPE_CLEAR_DEPTH;
804 zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
805 zstex->depth_cleared_level_mask |= BITFIELD_BIT(level);
806 update_db_depth_clear = true;
807 }
808 } else if ((*buffers & PIPE_CLEAR_DEPTHSTENCIL) == PIPE_CLEAR_DEPTHSTENCIL) {
809 if (si_can_fast_clear_depth(zstex, level, depth, *buffers) &&
810 si_can_fast_clear_stencil(zstex, level, stencil, *buffers)) {
811 /* Combined Z+S clear. */
812 clear_value = si_get_htile_clear_value(zstex, depth);
813 *buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
814 zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
815 zstex->depth_cleared_level_mask |= BITFIELD_BIT(level);
816 zstex->stencil_cleared_level_mask |= BITFIELD_BIT(level);
817 update_db_depth_clear = true;
818 update_db_stencil_clear = true;
819 }
820 }
821
822 if (!update_db_depth_clear) {
823 /* Clear to uncompressed, so that it doesn't contain values incompatible
824 * with the new TC-compatible HTILE setting.
825 *
826 * 0xfffff30f = uncompressed Z + S
827 * 0xfffc000f = uncompressed Z only
828 */
829 clear_value = !zstex->htile_stencil_disabled ? 0xfffff30f : 0xfffc000f;
830 }
831
832 zstex->need_flush_after_depth_decompression = sctx->chip_class == GFX10_3;
833
834 assert(num_clears < ARRAY_SIZE(info));
835 si_init_buffer_clear(&info[num_clears++], &zstex->buffer.b.b,
836 zstex->surface.meta_offset, zstex->surface.meta_size, clear_value);
837 clear_types |= SI_CLEAR_TYPE_HTILE;
838 }
839 } else if (num_clears || !fb_too_small) {
840 /* This is where the HTILE buffer clear is done.
841 *
842 * If there is no clear scheduled and the framebuffer size is too small, we should use
843 * the draw-based clear that is without waits. If there is some other clear scheduled,
844 * we will have to wait anyway, so add the HTILE buffer clear to the batch here.
845 * If the framebuffer size is large enough, use this codepath too.
846 */
847 uint64_t htile_offset = zstex->surface.meta_offset;
848 unsigned htile_size = 0;
849
850 /* Determine the HTILE subset to clear. */
851 if (sctx->chip_class >= GFX10) {
852 /* This can only clear a layered texture with 1 level or a mipmap texture
853 * with 1 layer. Other cases are unimplemented.
854 */
855 if (zs_num_layers == 1) {
856 /* Clear a specific level. */
857 htile_offset += zstex->surface.u.gfx9.meta_levels[level].offset;
858 htile_size = zstex->surface.u.gfx9.meta_levels[level].size;
859 } else if (zstex->buffer.b.b.last_level == 0) {
860 /* Clear all layers having only 1 level. */
861 htile_size = zstex->surface.meta_size;
862 }
863 } else {
864 /* This can only clear a layered texture with 1 level. Other cases are
865 * unimplemented.
866 */
867 if (zstex->buffer.b.b.last_level == 0)
868 htile_size = zstex->surface.meta_size;
869 }
870
871 /* Perform the clear if it's possible. */
872 if (zstex->htile_stencil_disabled || !zstex->surface.has_stencil) {
873 if (htile_size &&
874 si_can_fast_clear_depth(zstex, level, depth, *buffers)) {
875 /* Z-only clear. */
876 assert(num_clears < ARRAY_SIZE(info));
877 si_init_buffer_clear(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
878 htile_size, si_get_htile_clear_value(zstex, depth));
879 clear_types |= SI_CLEAR_TYPE_HTILE;
880 *buffers &= ~PIPE_CLEAR_DEPTH;
881 zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
882 zstex->depth_cleared_level_mask |= BITFIELD_BIT(level);
883 update_db_depth_clear = true;
884 }
885 } else if ((*buffers & PIPE_CLEAR_DEPTHSTENCIL) == PIPE_CLEAR_DEPTHSTENCIL) {
886 if (htile_size &&
887 si_can_fast_clear_depth(zstex, level, depth, *buffers) &&
888 si_can_fast_clear_stencil(zstex, level, stencil, *buffers)) {
889 /* Combined Z+S clear. */
890 assert(num_clears < ARRAY_SIZE(info));
891 si_init_buffer_clear(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
892 htile_size, si_get_htile_clear_value(zstex, depth));
893 clear_types |= SI_CLEAR_TYPE_HTILE;
894 *buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
895 zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
896 zstex->depth_cleared_level_mask |= BITFIELD_BIT(level);
897 zstex->stencil_cleared_level_mask |= BITFIELD_BIT(level);
898 update_db_depth_clear = true;
899 update_db_stencil_clear = true;
900 }
901 } else {
902 /* Z-only or S-only clear when both Z/S are present using a read-modify-write
903 * compute shader.
904 *
905 * If we get both clears but only one of them can be fast-cleared, we use
906 * the draw-based fast clear to do both at the same time.
907 */
908 const uint32_t htile_depth_writemask = 0xfffffc0f;
909 const uint32_t htile_stencil_writemask = 0x000003f0;
910
911 if (htile_size &&
912 !(*buffers & PIPE_CLEAR_STENCIL) &&
913 si_can_fast_clear_depth(zstex, level, depth, *buffers)) {
914 /* Z-only clear with stencil left intact. */
915 assert(num_clears < ARRAY_SIZE(info));
916 si_init_buffer_clear_rmw(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
917 htile_size, si_get_htile_clear_value(zstex, depth),
918 htile_depth_writemask);
919 clear_types |= SI_CLEAR_TYPE_HTILE;
920 *buffers &= ~PIPE_CLEAR_DEPTH;
921 zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
922 zstex->depth_cleared_level_mask |= BITFIELD_BIT(level);
923 update_db_depth_clear = true;
924 } else if (htile_size &&
925 !(*buffers & PIPE_CLEAR_DEPTH) &&
926 si_can_fast_clear_stencil(zstex, level, stencil, *buffers)) {
927 /* Stencil-only clear with depth left intact. */
928 assert(num_clears < ARRAY_SIZE(info));
929 si_init_buffer_clear_rmw(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
930 htile_size, si_get_htile_clear_value(zstex, depth),
931 htile_stencil_writemask);
932 clear_types |= SI_CLEAR_TYPE_HTILE;
933 *buffers &= ~PIPE_CLEAR_STENCIL;
934 zstex->stencil_cleared_level_mask |= BITFIELD_BIT(level);
935 update_db_stencil_clear = true;
936 }
937 }
938
939 zstex->need_flush_after_depth_decompression = update_db_depth_clear && sctx->chip_class == GFX10_3;
940
941 /* Update DB_DEPTH_CLEAR. */
942 if (update_db_depth_clear &&
943 zstex->depth_clear_value[level] != (float)depth) {
944 zstex->depth_clear_value[level] = depth;
945 sctx->framebuffer.dirty_zsbuf = true;
946 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
947 }
948
949 /* Update DB_STENCIL_CLEAR. */
950 if (update_db_stencil_clear &&
951 zstex->stencil_clear_value[level] != stencil) {
952 zstex->stencil_clear_value[level] = stencil;
953 sctx->framebuffer.dirty_zsbuf = true;
954 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
955 }
956 }
957 }
958
959 si_execute_clears(sctx, info, num_clears, clear_types);
960 }
961
si_clear(struct pipe_context * ctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)962 static void si_clear(struct pipe_context *ctx, unsigned buffers,
963 const struct pipe_scissor_state *scissor_state,
964 const union pipe_color_union *color, double depth, unsigned stencil)
965 {
966 struct si_context *sctx = (struct si_context *)ctx;
967 struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
968 struct pipe_surface *zsbuf = fb->zsbuf;
969 struct si_texture *zstex = zsbuf ? (struct si_texture *)zsbuf->texture : NULL;
970 bool needs_db_flush = false;
971
972 /* Unset clear flags for non-existent buffers. */
973 for (unsigned i = 0; i < 8; i++) {
974 if (i >= fb->nr_cbufs || !fb->cbufs[i])
975 buffers &= ~(PIPE_CLEAR_COLOR0 << i);
976 }
977 if (!zsbuf)
978 buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
979 else if (!util_format_has_stencil(util_format_description(zsbuf->format)))
980 buffers &= ~PIPE_CLEAR_STENCIL;
981
982 si_fast_clear(sctx, &buffers, color, depth, stencil);
983 if (!buffers)
984 return; /* all buffers have been cleared */
985
986 if (buffers & PIPE_CLEAR_COLOR) {
987 /* These buffers cannot use fast clear, make sure to disable expansion. */
988 unsigned color_buffer_mask = (buffers & PIPE_CLEAR_COLOR) >> util_logbase2(PIPE_CLEAR_COLOR0);
989 while (color_buffer_mask) {
990 unsigned i = u_bit_scan(&color_buffer_mask);
991 struct si_texture *tex = (struct si_texture *)fb->cbufs[i]->texture;
992 if (tex->surface.fmask_size == 0)
993 tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
994 }
995 }
996
997 if (zstex && zsbuf->u.tex.first_layer == 0 &&
998 zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
999 unsigned level = zsbuf->u.tex.level;
1000
1001 if (si_can_fast_clear_depth(zstex, level, depth, buffers)) {
1002 /* Need to disable EXPCLEAR temporarily if clearing
1003 * to a new value. */
1004 if (!(zstex->depth_cleared_level_mask_once & BITFIELD_BIT(level)) ||
1005 zstex->depth_clear_value[level] != depth) {
1006 sctx->db_depth_disable_expclear = true;
1007 }
1008
1009 if (zstex->depth_clear_value[level] != (float)depth) {
1010 if ((zstex->depth_clear_value[level] != 0) != (depth != 0)) {
1011 /* ZRANGE_PRECISION register of a bound surface will change so we
1012 * must flush the DB caches. */
1013 needs_db_flush = true;
1014 }
1015 /* Update DB_DEPTH_CLEAR. */
1016 zstex->depth_clear_value[level] = depth;
1017 sctx->framebuffer.dirty_zsbuf = true;
1018 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1019 }
1020 sctx->db_depth_clear = true;
1021 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1022 }
1023
1024 if (si_can_fast_clear_stencil(zstex, level, stencil, buffers)) {
1025 stencil &= 0xff;
1026
1027 /* Need to disable EXPCLEAR temporarily if clearing
1028 * to a new value. */
1029 if (!(zstex->stencil_cleared_level_mask & BITFIELD_BIT(level)) ||
1030 zstex->stencil_clear_value[level] != stencil) {
1031 sctx->db_stencil_disable_expclear = true;
1032 }
1033
1034 if (zstex->stencil_clear_value[level] != (uint8_t)stencil) {
1035 /* Update DB_STENCIL_CLEAR. */
1036 zstex->stencil_clear_value[level] = stencil;
1037 sctx->framebuffer.dirty_zsbuf = true;
1038 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1039 }
1040 sctx->db_stencil_clear = true;
1041 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1042 }
1043
1044 if (needs_db_flush)
1045 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB;
1046 }
1047
1048 if (unlikely(sctx->thread_trace_enabled)) {
1049 if (buffers & PIPE_CLEAR_COLOR)
1050 sctx->sqtt_next_event = EventCmdClearColorImage;
1051 else if (buffers & PIPE_CLEAR_DEPTHSTENCIL)
1052 sctx->sqtt_next_event = EventCmdClearDepthStencilImage;
1053 }
1054
1055 si_blitter_begin(sctx, SI_CLEAR);
1056 util_blitter_clear(sctx->blitter, fb->width, fb->height, util_framebuffer_get_num_layers(fb),
1057 buffers, color, depth, stencil, sctx->framebuffer.nr_samples > 1);
1058 si_blitter_end(sctx);
1059
1060 if (sctx->db_depth_clear) {
1061 sctx->db_depth_clear = false;
1062 sctx->db_depth_disable_expclear = false;
1063 zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(zsbuf->u.tex.level);
1064 zstex->depth_cleared_level_mask |= BITFIELD_BIT(zsbuf->u.tex.level);
1065 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1066 }
1067
1068 if (sctx->db_stencil_clear) {
1069 sctx->db_stencil_clear = false;
1070 sctx->db_stencil_disable_expclear = false;
1071 zstex->stencil_cleared_level_mask |= BITFIELD_BIT(zsbuf->u.tex.level);
1072 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1073 }
1074 }
1075
si_clear_render_target(struct pipe_context * ctx,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1076 static void si_clear_render_target(struct pipe_context *ctx, struct pipe_surface *dst,
1077 const union pipe_color_union *color, unsigned dstx,
1078 unsigned dsty, unsigned width, unsigned height,
1079 bool render_condition_enabled)
1080 {
1081 struct si_context *sctx = (struct si_context *)ctx;
1082 struct si_texture *sdst = (struct si_texture *)dst->texture;
1083
1084 if (dst->texture->nr_samples <= 1 &&
1085 (sctx->chip_class >= GFX10 || !vi_dcc_enabled(sdst, dst->u.tex.level))) {
1086 si_compute_clear_render_target(ctx, dst, color, dstx, dsty, width, height,
1087 render_condition_enabled);
1088 return;
1089 }
1090
1091 si_blitter_begin(sctx,
1092 SI_CLEAR_SURFACE | (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
1093 util_blitter_clear_render_target(sctx->blitter, dst, color, dstx, dsty, width, height);
1094 si_blitter_end(sctx);
1095 }
1096
si_clear_depth_stencil(struct pipe_context * ctx,struct pipe_surface * dst,unsigned clear_flags,double depth,unsigned stencil,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1097 static void si_clear_depth_stencil(struct pipe_context *ctx, struct pipe_surface *dst,
1098 unsigned clear_flags, double depth, unsigned stencil,
1099 unsigned dstx, unsigned dsty, unsigned width, unsigned height,
1100 bool render_condition_enabled)
1101 {
1102 struct si_context *sctx = (struct si_context *)ctx;
1103
1104 si_blitter_begin(sctx,
1105 SI_CLEAR_SURFACE | (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
1106 util_blitter_clear_depth_stencil(sctx->blitter, dst, clear_flags, depth, stencil, dstx, dsty,
1107 width, height);
1108 si_blitter_end(sctx);
1109 }
1110
si_clear_texture(struct pipe_context * pipe,struct pipe_resource * tex,unsigned level,const struct pipe_box * box,const void * data)1111 static void si_clear_texture(struct pipe_context *pipe, struct pipe_resource *tex, unsigned level,
1112 const struct pipe_box *box, const void *data)
1113 {
1114 struct pipe_screen *screen = pipe->screen;
1115 struct si_texture *stex = (struct si_texture *)tex;
1116 struct pipe_surface tmpl = {{0}};
1117 struct pipe_surface *sf;
1118
1119 tmpl.format = tex->format;
1120 tmpl.u.tex.first_layer = box->z;
1121 tmpl.u.tex.last_layer = box->z + box->depth - 1;
1122 tmpl.u.tex.level = level;
1123 sf = pipe->create_surface(pipe, tex, &tmpl);
1124 if (!sf)
1125 return;
1126
1127 if (stex->is_depth) {
1128 unsigned clear;
1129 float depth;
1130 uint8_t stencil = 0;
1131
1132 /* Depth is always present. */
1133 clear = PIPE_CLEAR_DEPTH;
1134 util_format_unpack_z_float(tex->format, &depth, data, 1);
1135
1136 if (stex->surface.has_stencil) {
1137 clear |= PIPE_CLEAR_STENCIL;
1138 util_format_unpack_s_8uint(tex->format, &stencil, data, 1);
1139 }
1140
1141 si_clear_depth_stencil(pipe, sf, clear, depth, stencil, box->x, box->y, box->width,
1142 box->height, false);
1143 } else {
1144 union pipe_color_union color;
1145
1146 util_format_unpack_rgba(tex->format, color.ui, data, 1);
1147
1148 if (screen->is_format_supported(screen, tex->format, tex->target, 0, 0,
1149 PIPE_BIND_RENDER_TARGET)) {
1150 si_clear_render_target(pipe, sf, &color, box->x, box->y, box->width, box->height, false);
1151 } else {
1152 /* Software fallback - just for R9G9B9E5_FLOAT */
1153 util_clear_render_target(pipe, sf, &color, box->x, box->y, box->width, box->height);
1154 }
1155 }
1156 pipe_surface_reference(&sf, NULL);
1157 }
1158
si_init_clear_functions(struct si_context * sctx)1159 void si_init_clear_functions(struct si_context *sctx)
1160 {
1161 sctx->b.clear_render_target = si_clear_render_target;
1162 sctx->b.clear_texture = si_clear_texture;
1163
1164 if (sctx->has_graphics) {
1165 sctx->b.clear = si_clear;
1166 sctx->b.clear_depth_stencil = si_clear_depth_stencil;
1167 }
1168 }
1169