1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_formats.h"
24 #include "r600_shader.h"
25 #include "r600_query.h"
26 #include "evergreend.h"
27
28 #include "pipe/p_shader_tokens.h"
29 #include "util/u_pack_color.h"
30 #include "util/u_memory.h"
31 #include "util/u_framebuffer.h"
32 #include "util/u_dual_blend.h"
33 #include "evergreen_compute.h"
34 #include "util/u_math.h"
35
evergreen_array_mode(unsigned mode)36 static inline unsigned evergreen_array_mode(unsigned mode)
37 {
38 switch (mode) {
39 default:
40 case RADEON_SURF_MODE_LINEAR_ALIGNED: return V_028C70_ARRAY_LINEAR_ALIGNED;
41 break;
42 case RADEON_SURF_MODE_1D: return V_028C70_ARRAY_1D_TILED_THIN1;
43 break;
44 case RADEON_SURF_MODE_2D: return V_028C70_ARRAY_2D_TILED_THIN1;
45 }
46 }
47
eg_num_banks(uint32_t nbanks)48 static uint32_t eg_num_banks(uint32_t nbanks)
49 {
50 switch (nbanks) {
51 case 2:
52 return 0;
53 case 4:
54 return 1;
55 case 8:
56 default:
57 return 2;
58 case 16:
59 return 3;
60 }
61 }
62
63
eg_tile_split(unsigned tile_split)64 static unsigned eg_tile_split(unsigned tile_split)
65 {
66 switch (tile_split) {
67 case 64: tile_split = 0; break;
68 case 128: tile_split = 1; break;
69 case 256: tile_split = 2; break;
70 case 512: tile_split = 3; break;
71 default:
72 case 1024: tile_split = 4; break;
73 case 2048: tile_split = 5; break;
74 case 4096: tile_split = 6; break;
75 }
76 return tile_split;
77 }
78
eg_macro_tile_aspect(unsigned macro_tile_aspect)79 static unsigned eg_macro_tile_aspect(unsigned macro_tile_aspect)
80 {
81 switch (macro_tile_aspect) {
82 default:
83 case 1: macro_tile_aspect = 0; break;
84 case 2: macro_tile_aspect = 1; break;
85 case 4: macro_tile_aspect = 2; break;
86 case 8: macro_tile_aspect = 3; break;
87 }
88 return macro_tile_aspect;
89 }
90
eg_bank_wh(unsigned bankwh)91 static unsigned eg_bank_wh(unsigned bankwh)
92 {
93 switch (bankwh) {
94 default:
95 case 1: bankwh = 0; break;
96 case 2: bankwh = 1; break;
97 case 4: bankwh = 2; break;
98 case 8: bankwh = 3; break;
99 }
100 return bankwh;
101 }
102
r600_translate_blend_function(int blend_func)103 static uint32_t r600_translate_blend_function(int blend_func)
104 {
105 switch (blend_func) {
106 case PIPE_BLEND_ADD:
107 return V_028780_COMB_DST_PLUS_SRC;
108 case PIPE_BLEND_SUBTRACT:
109 return V_028780_COMB_SRC_MINUS_DST;
110 case PIPE_BLEND_REVERSE_SUBTRACT:
111 return V_028780_COMB_DST_MINUS_SRC;
112 case PIPE_BLEND_MIN:
113 return V_028780_COMB_MIN_DST_SRC;
114 case PIPE_BLEND_MAX:
115 return V_028780_COMB_MAX_DST_SRC;
116 default:
117 R600_ERR("Unknown blend function %d\n", blend_func);
118 assert(0);
119 break;
120 }
121 return 0;
122 }
123
r600_translate_blend_factor(int blend_fact)124 static uint32_t r600_translate_blend_factor(int blend_fact)
125 {
126 switch (blend_fact) {
127 case PIPE_BLENDFACTOR_ONE:
128 return V_028780_BLEND_ONE;
129 case PIPE_BLENDFACTOR_SRC_COLOR:
130 return V_028780_BLEND_SRC_COLOR;
131 case PIPE_BLENDFACTOR_SRC_ALPHA:
132 return V_028780_BLEND_SRC_ALPHA;
133 case PIPE_BLENDFACTOR_DST_ALPHA:
134 return V_028780_BLEND_DST_ALPHA;
135 case PIPE_BLENDFACTOR_DST_COLOR:
136 return V_028780_BLEND_DST_COLOR;
137 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
138 return V_028780_BLEND_SRC_ALPHA_SATURATE;
139 case PIPE_BLENDFACTOR_CONST_COLOR:
140 return V_028780_BLEND_CONST_COLOR;
141 case PIPE_BLENDFACTOR_CONST_ALPHA:
142 return V_028780_BLEND_CONST_ALPHA;
143 case PIPE_BLENDFACTOR_ZERO:
144 return V_028780_BLEND_ZERO;
145 case PIPE_BLENDFACTOR_INV_SRC_COLOR:
146 return V_028780_BLEND_ONE_MINUS_SRC_COLOR;
147 case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
148 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA;
149 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
150 return V_028780_BLEND_ONE_MINUS_DST_ALPHA;
151 case PIPE_BLENDFACTOR_INV_DST_COLOR:
152 return V_028780_BLEND_ONE_MINUS_DST_COLOR;
153 case PIPE_BLENDFACTOR_INV_CONST_COLOR:
154 return V_028780_BLEND_ONE_MINUS_CONST_COLOR;
155 case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
156 return V_028780_BLEND_ONE_MINUS_CONST_ALPHA;
157 case PIPE_BLENDFACTOR_SRC1_COLOR:
158 return V_028780_BLEND_SRC1_COLOR;
159 case PIPE_BLENDFACTOR_SRC1_ALPHA:
160 return V_028780_BLEND_SRC1_ALPHA;
161 case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
162 return V_028780_BLEND_INV_SRC1_COLOR;
163 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
164 return V_028780_BLEND_INV_SRC1_ALPHA;
165 default:
166 R600_ERR("Bad blend factor %d not supported!\n", blend_fact);
167 assert(0);
168 break;
169 }
170 return 0;
171 }
172
r600_tex_dim(struct r600_texture * rtex,unsigned view_target,unsigned nr_samples)173 static unsigned r600_tex_dim(struct r600_texture *rtex,
174 unsigned view_target, unsigned nr_samples)
175 {
176 unsigned res_target = rtex->resource.b.b.target;
177
178 if (view_target == PIPE_TEXTURE_CUBE ||
179 view_target == PIPE_TEXTURE_CUBE_ARRAY)
180 res_target = view_target;
181 /* If interpreting cubemaps as something else, set 2D_ARRAY. */
182 else if (res_target == PIPE_TEXTURE_CUBE ||
183 res_target == PIPE_TEXTURE_CUBE_ARRAY)
184 res_target = PIPE_TEXTURE_2D_ARRAY;
185
186 switch (res_target) {
187 default:
188 case PIPE_TEXTURE_1D:
189 return V_030000_SQ_TEX_DIM_1D;
190 case PIPE_TEXTURE_1D_ARRAY:
191 return V_030000_SQ_TEX_DIM_1D_ARRAY;
192 case PIPE_TEXTURE_2D:
193 case PIPE_TEXTURE_RECT:
194 return nr_samples > 1 ? V_030000_SQ_TEX_DIM_2D_MSAA :
195 V_030000_SQ_TEX_DIM_2D;
196 case PIPE_TEXTURE_2D_ARRAY:
197 return nr_samples > 1 ? V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA :
198 V_030000_SQ_TEX_DIM_2D_ARRAY;
199 case PIPE_TEXTURE_3D:
200 return V_030000_SQ_TEX_DIM_3D;
201 case PIPE_TEXTURE_CUBE:
202 case PIPE_TEXTURE_CUBE_ARRAY:
203 return V_030000_SQ_TEX_DIM_CUBEMAP;
204 }
205 }
206
r600_translate_dbformat(enum pipe_format format)207 static uint32_t r600_translate_dbformat(enum pipe_format format)
208 {
209 switch (format) {
210 case PIPE_FORMAT_Z16_UNORM:
211 return V_028040_Z_16;
212 case PIPE_FORMAT_Z24X8_UNORM:
213 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
214 case PIPE_FORMAT_X8Z24_UNORM:
215 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
216 return V_028040_Z_24;
217 case PIPE_FORMAT_Z32_FLOAT:
218 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
219 return V_028040_Z_32_FLOAT;
220 default:
221 return ~0U;
222 }
223 }
224
r600_is_sampler_format_supported(struct pipe_screen * screen,enum pipe_format format)225 static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format)
226 {
227 return r600_translate_texformat(screen, format, NULL, NULL, NULL,
228 FALSE) != ~0U;
229 }
230
r600_is_colorbuffer_format_supported(enum chip_class chip,enum pipe_format format)231 static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format)
232 {
233 return r600_translate_colorformat(chip, format, FALSE) != ~0U &&
234 r600_translate_colorswap(format, FALSE) != ~0U;
235 }
236
r600_is_zs_format_supported(enum pipe_format format)237 static bool r600_is_zs_format_supported(enum pipe_format format)
238 {
239 return r600_translate_dbformat(format) != ~0U;
240 }
241
evergreen_is_format_supported(struct pipe_screen * screen,enum pipe_format format,enum pipe_texture_target target,unsigned sample_count,unsigned storage_sample_count,unsigned usage)242 bool evergreen_is_format_supported(struct pipe_screen *screen,
243 enum pipe_format format,
244 enum pipe_texture_target target,
245 unsigned sample_count,
246 unsigned storage_sample_count,
247 unsigned usage)
248 {
249 struct r600_screen *rscreen = (struct r600_screen*)screen;
250 unsigned retval = 0;
251
252 if (target >= PIPE_MAX_TEXTURE_TYPES) {
253 R600_ERR("r600: unsupported texture type %d\n", target);
254 return false;
255 }
256
257 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
258 return false;
259
260 if (sample_count > 1) {
261 if (!rscreen->has_msaa)
262 return false;
263
264 switch (sample_count) {
265 case 2:
266 case 4:
267 case 8:
268 break;
269 default:
270 return false;
271 }
272 }
273
274 if (usage & PIPE_BIND_SAMPLER_VIEW) {
275 if (target == PIPE_BUFFER) {
276 if (r600_is_vertex_format_supported(format))
277 retval |= PIPE_BIND_SAMPLER_VIEW;
278 } else {
279 if (r600_is_sampler_format_supported(screen, format))
280 retval |= PIPE_BIND_SAMPLER_VIEW;
281 }
282 }
283
284 if ((usage & (PIPE_BIND_RENDER_TARGET |
285 PIPE_BIND_DISPLAY_TARGET |
286 PIPE_BIND_SCANOUT |
287 PIPE_BIND_SHARED |
288 PIPE_BIND_BLENDABLE)) &&
289 r600_is_colorbuffer_format_supported(rscreen->b.chip_class, format)) {
290 retval |= usage &
291 (PIPE_BIND_RENDER_TARGET |
292 PIPE_BIND_DISPLAY_TARGET |
293 PIPE_BIND_SCANOUT |
294 PIPE_BIND_SHARED);
295 if (!util_format_is_pure_integer(format) &&
296 !util_format_is_depth_or_stencil(format))
297 retval |= usage & PIPE_BIND_BLENDABLE;
298 }
299
300 if ((usage & PIPE_BIND_DEPTH_STENCIL) &&
301 r600_is_zs_format_supported(format)) {
302 retval |= PIPE_BIND_DEPTH_STENCIL;
303 }
304
305 if ((usage & PIPE_BIND_VERTEX_BUFFER) &&
306 r600_is_vertex_format_supported(format)) {
307 retval |= PIPE_BIND_VERTEX_BUFFER;
308 }
309
310 if (usage & PIPE_BIND_INDEX_BUFFER &&
311 r600_is_index_format_supported(format)) {
312 retval |= PIPE_BIND_INDEX_BUFFER;
313 }
314
315 if ((usage & PIPE_BIND_LINEAR) &&
316 !util_format_is_compressed(format) &&
317 !(usage & PIPE_BIND_DEPTH_STENCIL))
318 retval |= PIPE_BIND_LINEAR;
319
320 return retval == usage;
321 }
322
evergreen_create_blend_state_mode(struct pipe_context * ctx,const struct pipe_blend_state * state,int mode)323 static void *evergreen_create_blend_state_mode(struct pipe_context *ctx,
324 const struct pipe_blend_state *state, int mode)
325 {
326 uint32_t color_control = 0, target_mask = 0;
327 struct r600_blend_state *blend = CALLOC_STRUCT(r600_blend_state);
328
329 if (!blend) {
330 return NULL;
331 }
332
333 r600_init_command_buffer(&blend->buffer, 20);
334 r600_init_command_buffer(&blend->buffer_no_blend, 20);
335
336 if (state->logicop_enable) {
337 color_control |= (state->logicop_func << 16) | (state->logicop_func << 20);
338 } else {
339 color_control |= (0xcc << 16);
340 }
341 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */
342 if (state->independent_blend_enable) {
343 for (int i = 0; i < 8; i++) {
344 target_mask |= (state->rt[i].colormask << (4 * i));
345 }
346 } else {
347 for (int i = 0; i < 8; i++) {
348 target_mask |= (state->rt[0].colormask << (4 * i));
349 }
350 }
351
352 /* only have dual source on MRT0 */
353 blend->dual_src_blend = util_blend_state_is_dual(state, 0);
354 blend->cb_target_mask = target_mask;
355 blend->alpha_to_one = state->alpha_to_one;
356
357 if (target_mask)
358 color_control |= S_028808_MODE(mode);
359 else
360 color_control |= S_028808_MODE(V_028808_CB_DISABLE);
361
362
363 r600_store_context_reg(&blend->buffer, R_028808_CB_COLOR_CONTROL, color_control);
364 r600_store_context_reg(&blend->buffer, R_028B70_DB_ALPHA_TO_MASK,
365 S_028B70_ALPHA_TO_MASK_ENABLE(state->alpha_to_coverage) |
366 S_028B70_ALPHA_TO_MASK_OFFSET0(2) |
367 S_028B70_ALPHA_TO_MASK_OFFSET1(2) |
368 S_028B70_ALPHA_TO_MASK_OFFSET2(2) |
369 S_028B70_ALPHA_TO_MASK_OFFSET3(2));
370 r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8);
371
372 /* Copy over the dwords set so far into buffer_no_blend.
373 * Only the CB_BLENDi_CONTROL registers must be set after this. */
374 memcpy(blend->buffer_no_blend.buf, blend->buffer.buf, blend->buffer.num_dw * 4);
375 blend->buffer_no_blend.num_dw = blend->buffer.num_dw;
376
377 for (int i = 0; i < 8; i++) {
378 /* state->rt entries > 0 only written if independent blending */
379 const int j = state->independent_blend_enable ? i : 0;
380
381 unsigned eqRGB = state->rt[j].rgb_func;
382 unsigned srcRGB = state->rt[j].rgb_src_factor;
383 unsigned dstRGB = state->rt[j].rgb_dst_factor;
384 unsigned eqA = state->rt[j].alpha_func;
385 unsigned srcA = state->rt[j].alpha_src_factor;
386 unsigned dstA = state->rt[j].alpha_dst_factor;
387 uint32_t bc = 0;
388
389 r600_store_value(&blend->buffer_no_blend, 0);
390
391 if (!state->rt[j].blend_enable) {
392 r600_store_value(&blend->buffer, 0);
393 continue;
394 }
395
396 bc |= S_028780_BLEND_CONTROL_ENABLE(1);
397 bc |= S_028780_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB));
398 bc |= S_028780_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB));
399 bc |= S_028780_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB));
400
401 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
402 bc |= S_028780_SEPARATE_ALPHA_BLEND(1);
403 bc |= S_028780_ALPHA_COMB_FCN(r600_translate_blend_function(eqA));
404 bc |= S_028780_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA));
405 bc |= S_028780_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA));
406 }
407 r600_store_value(&blend->buffer, bc);
408 }
409 return blend;
410 }
411
evergreen_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * state)412 static void *evergreen_create_blend_state(struct pipe_context *ctx,
413 const struct pipe_blend_state *state)
414 {
415
416 return evergreen_create_blend_state_mode(ctx, state, V_028808_CB_NORMAL);
417 }
418
evergreen_create_dsa_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * state)419 static void *evergreen_create_dsa_state(struct pipe_context *ctx,
420 const struct pipe_depth_stencil_alpha_state *state)
421 {
422 unsigned db_depth_control, alpha_test_control, alpha_ref;
423 struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state);
424
425 if (!dsa) {
426 return NULL;
427 }
428
429 r600_init_command_buffer(&dsa->buffer, 3);
430
431 dsa->valuemask[0] = state->stencil[0].valuemask;
432 dsa->valuemask[1] = state->stencil[1].valuemask;
433 dsa->writemask[0] = state->stencil[0].writemask;
434 dsa->writemask[1] = state->stencil[1].writemask;
435 dsa->zwritemask = state->depth_writemask;
436
437 db_depth_control = S_028800_Z_ENABLE(state->depth_enabled) |
438 S_028800_Z_WRITE_ENABLE(state->depth_writemask) |
439 S_028800_ZFUNC(state->depth_func);
440
441 /* stencil */
442 if (state->stencil[0].enabled) {
443 db_depth_control |= S_028800_STENCIL_ENABLE(1);
444 db_depth_control |= S_028800_STENCILFUNC(state->stencil[0].func); /* translates straight */
445 db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op));
446 db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op));
447 db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op));
448
449 if (state->stencil[1].enabled) {
450 db_depth_control |= S_028800_BACKFACE_ENABLE(1);
451 db_depth_control |= S_028800_STENCILFUNC_BF(state->stencil[1].func); /* translates straight */
452 db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op));
453 db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op));
454 db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op));
455 }
456 }
457
458 /* alpha */
459 alpha_test_control = 0;
460 alpha_ref = 0;
461 if (state->alpha_enabled) {
462 alpha_test_control = S_028410_ALPHA_FUNC(state->alpha_func);
463 alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1);
464 alpha_ref = fui(state->alpha_ref_value);
465 }
466 dsa->sx_alpha_test_control = alpha_test_control & 0xff;
467 dsa->alpha_ref = alpha_ref;
468
469 /* misc */
470 r600_store_context_reg(&dsa->buffer, R_028800_DB_DEPTH_CONTROL, db_depth_control);
471 return dsa;
472 }
473
evergreen_create_rs_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * state)474 static void *evergreen_create_rs_state(struct pipe_context *ctx,
475 const struct pipe_rasterizer_state *state)
476 {
477 struct r600_context *rctx = (struct r600_context *)ctx;
478 unsigned tmp, spi_interp;
479 float psize_min, psize_max;
480 struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state);
481
482 if (!rs) {
483 return NULL;
484 }
485
486 r600_init_command_buffer(&rs->buffer, 30);
487
488 rs->scissor_enable = state->scissor;
489 rs->clip_halfz = state->clip_halfz;
490 rs->flatshade = state->flatshade;
491 rs->sprite_coord_enable = state->sprite_coord_enable;
492 rs->rasterizer_discard = state->rasterizer_discard;
493 rs->two_side = state->light_twoside;
494 rs->clip_plane_enable = state->clip_plane_enable;
495 rs->pa_sc_line_stipple = state->line_stipple_enable ?
496 S_028A0C_LINE_PATTERN(state->line_stipple_pattern) |
497 S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0;
498 rs->pa_cl_clip_cntl =
499 S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) |
500 S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip_near) |
501 S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip_far) |
502 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1) |
503 S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard);
504 rs->multisample_enable = state->multisample;
505
506 /* offset */
507 rs->offset_units = state->offset_units;
508 rs->offset_scale = state->offset_scale * 16.0f;
509 rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri;
510 rs->offset_units_unscaled = state->offset_units_unscaled;
511
512 if (state->point_size_per_vertex) {
513 psize_min = util_get_min_point_size(state);
514 psize_max = 8192;
515 } else {
516 /* Force the point size to be as if the vertex output was disabled. */
517 psize_min = state->point_size;
518 psize_max = state->point_size;
519 }
520
521 spi_interp = S_0286D4_FLAT_SHADE_ENA(1);
522 spi_interp |= S_0286D4_PNT_SPRITE_ENA(1) |
523 S_0286D4_PNT_SPRITE_OVRD_X(2) |
524 S_0286D4_PNT_SPRITE_OVRD_Y(3) |
525 S_0286D4_PNT_SPRITE_OVRD_Z(0) |
526 S_0286D4_PNT_SPRITE_OVRD_W(1);
527 if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) {
528 spi_interp |= S_0286D4_PNT_SPRITE_TOP_1(1);
529 }
530
531 r600_store_context_reg_seq(&rs->buffer, R_028A00_PA_SU_POINT_SIZE, 3);
532 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel) */
533 tmp = r600_pack_float_12p4(state->point_size/2);
534 r600_store_value(&rs->buffer, /* R_028A00_PA_SU_POINT_SIZE */
535 S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
536 r600_store_value(&rs->buffer, /* R_028A04_PA_SU_POINT_MINMAX */
537 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min/2)) |
538 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max/2)));
539 r600_store_value(&rs->buffer, /* R_028A08_PA_SU_LINE_CNTL */
540 S_028A08_WIDTH((unsigned)(state->line_width * 8)));
541
542 r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp);
543 r600_store_context_reg(&rs->buffer, R_028A48_PA_SC_MODE_CNTL_0,
544 S_028A48_MSAA_ENABLE(state->multisample) |
545 S_028A48_VPORT_SCISSOR_ENABLE(1) |
546 S_028A48_LINE_STIPPLE_ENABLE(state->line_stipple_enable));
547
548 if (rctx->b.chip_class == CAYMAN) {
549 r600_store_context_reg(&rs->buffer, CM_R_028BE4_PA_SU_VTX_CNTL,
550 S_028C08_PIX_CENTER_HALF(state->half_pixel_center) |
551 S_028C08_QUANT_MODE(V_028C08_X_1_256TH));
552 } else {
553 r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL,
554 S_028C08_PIX_CENTER_HALF(state->half_pixel_center) |
555 S_028C08_QUANT_MODE(V_028C08_X_1_256TH));
556 }
557
558 r600_store_context_reg(&rs->buffer, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp));
559 r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL,
560 S_028814_PROVOKING_VTX_LAST(!state->flatshade_first) |
561 S_028814_CULL_FRONT((state->cull_face & PIPE_FACE_FRONT) ? 1 : 0) |
562 S_028814_CULL_BACK((state->cull_face & PIPE_FACE_BACK) ? 1 : 0) |
563 S_028814_FACE(!state->front_ccw) |
564 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) |
565 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) |
566 S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) |
567 S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL ||
568 state->fill_back != PIPE_POLYGON_MODE_FILL) |
569 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) |
570 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state->fill_back)));
571 return rs;
572 }
573
evergreen_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)574 static void *evergreen_create_sampler_state(struct pipe_context *ctx,
575 const struct pipe_sampler_state *state)
576 {
577 struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
578 struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state);
579 unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso
580 : state->max_anisotropy;
581 unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso);
582 bool trunc_coord = state->min_img_filter == PIPE_TEX_FILTER_NEAREST &&
583 state->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
584 float max_lod = state->max_lod;
585
586 if (!ss) {
587 return NULL;
588 }
589
590 /* If the min_mip_filter is NONE, then the texture has no mipmapping and
591 * MIP_FILTER will also be set to NONE. However, if more then one LOD is
592 * configured, then the texture lookup seems to fail for some specific texture
593 * formats. Forcing the number of LODs to one in this case fixes it. */
594 if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
595 max_lod = state->min_lod;
596
597 ss->border_color_use = sampler_state_needs_border_color(state);
598
599 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */
600 ss->tex_sampler_words[0] =
601 S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) |
602 S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) |
603 S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) |
604 S_03C000_XY_MAG_FILTER(eg_tex_filter(state->mag_img_filter, max_aniso)) |
605 S_03C000_XY_MIN_FILTER(eg_tex_filter(state->min_img_filter, max_aniso)) |
606 S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) |
607 S_03C000_MAX_ANISO_RATIO(max_aniso_ratio) |
608 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) |
609 S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0);
610 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
611 ss->tex_sampler_words[1] =
612 S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 8)) |
613 S_03C004_MAX_LOD(S_FIXED(CLAMP(max_lod, 0, 15), 8));
614 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */
615 ss->tex_sampler_words[2] =
616 S_03C008_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 8)) |
617 (state->seamless_cube_map ? 0 : S_03C008_DISABLE_CUBE_WRAP(1)) |
618 S_03C008_TRUNCATE_COORD(trunc_coord) |
619 S_03C008_TYPE(1);
620
621 if (ss->border_color_use) {
622 memcpy(&ss->border_color, &state->border_color, sizeof(state->border_color));
623 }
624 return ss;
625 }
626
627 struct eg_buf_res_params {
628 enum pipe_format pipe_format;
629 unsigned offset;
630 unsigned size;
631 unsigned char swizzle[4];
632 bool uncached;
633 bool force_swizzle;
634 bool size_in_bytes;
635 };
636
evergreen_fill_buffer_resource_words(struct r600_context * rctx,struct pipe_resource * buffer,struct eg_buf_res_params * params,bool * skip_mip_address_reloc,unsigned tex_resource_words[8])637 static void evergreen_fill_buffer_resource_words(struct r600_context *rctx,
638 struct pipe_resource *buffer,
639 struct eg_buf_res_params *params,
640 bool *skip_mip_address_reloc,
641 unsigned tex_resource_words[8])
642 {
643 struct r600_texture *tmp = (struct r600_texture*)buffer;
644 uint64_t va;
645 int stride = util_format_get_blocksize(params->pipe_format);
646 unsigned format, num_format, format_comp, endian;
647 unsigned swizzle_res;
648 const struct util_format_description *desc;
649
650 r600_vertex_data_type(params->pipe_format,
651 &format, &num_format, &format_comp,
652 &endian);
653
654 desc = util_format_description(params->pipe_format);
655
656 if (params->force_swizzle)
657 swizzle_res = r600_get_swizzle_combined(params->swizzle, NULL, TRUE);
658 else
659 swizzle_res = r600_get_swizzle_combined(desc->swizzle, params->swizzle, TRUE);
660
661 va = tmp->resource.gpu_address + params->offset;
662 *skip_mip_address_reloc = true;
663 tex_resource_words[0] = va;
664 tex_resource_words[1] = params->size - 1;
665 tex_resource_words[2] = S_030008_BASE_ADDRESS_HI(va >> 32UL) |
666 S_030008_STRIDE(stride) |
667 S_030008_DATA_FORMAT(format) |
668 S_030008_NUM_FORMAT_ALL(num_format) |
669 S_030008_FORMAT_COMP_ALL(format_comp) |
670 S_030008_ENDIAN_SWAP(endian);
671 tex_resource_words[3] = swizzle_res | S_03000C_UNCACHED(params->uncached);
672 /*
673 * dword 4 is for number of elements, for use with resinfo,
674 * albeit the amd gpu shader analyser
675 * uses a const buffer to store the element sizes for buffer txq
676 */
677 tex_resource_words[4] = params->size_in_bytes ? params->size : (params->size / stride);
678
679 tex_resource_words[5] = tex_resource_words[6] = 0;
680 tex_resource_words[7] = S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER);
681 }
682
683 static struct pipe_sampler_view *
texture_buffer_sampler_view(struct r600_context * rctx,struct r600_pipe_sampler_view * view,unsigned width0,unsigned height0)684 texture_buffer_sampler_view(struct r600_context *rctx,
685 struct r600_pipe_sampler_view *view,
686 unsigned width0, unsigned height0)
687 {
688 struct r600_texture *tmp = (struct r600_texture*)view->base.texture;
689 struct eg_buf_res_params params;
690
691 memset(¶ms, 0, sizeof(params));
692
693 params.pipe_format = view->base.format;
694 params.offset = view->base.u.buf.offset;
695 params.size = view->base.u.buf.size;
696 params.swizzle[0] = view->base.swizzle_r;
697 params.swizzle[1] = view->base.swizzle_g;
698 params.swizzle[2] = view->base.swizzle_b;
699 params.swizzle[3] = view->base.swizzle_a;
700
701 evergreen_fill_buffer_resource_words(rctx, view->base.texture,
702 ¶ms, &view->skip_mip_address_reloc,
703 view->tex_resource_words);
704 view->tex_resource = &tmp->resource;
705
706 if (tmp->resource.gpu_address)
707 list_addtail(&view->list, &rctx->texture_buffers);
708 return &view->base;
709 }
710
711 struct eg_tex_res_params {
712 enum pipe_format pipe_format;
713 int force_level;
714 unsigned width0;
715 unsigned height0;
716 unsigned first_level;
717 unsigned last_level;
718 unsigned first_layer;
719 unsigned last_layer;
720 unsigned target;
721 unsigned char swizzle[4];
722 };
723
evergreen_fill_tex_resource_words(struct r600_context * rctx,struct pipe_resource * texture,struct eg_tex_res_params * params,bool * skip_mip_address_reloc,unsigned tex_resource_words[8])724 static int evergreen_fill_tex_resource_words(struct r600_context *rctx,
725 struct pipe_resource *texture,
726 struct eg_tex_res_params *params,
727 bool *skip_mip_address_reloc,
728 unsigned tex_resource_words[8])
729 {
730 struct r600_screen *rscreen = (struct r600_screen*)rctx->b.b.screen;
731 struct r600_texture *tmp = (struct r600_texture*)texture;
732 unsigned format, endian;
733 uint32_t word4 = 0, yuv_format = 0, pitch = 0;
734 unsigned char array_mode = 0, non_disp_tiling = 0;
735 unsigned height, depth, width;
736 unsigned macro_aspect, tile_split, bankh, bankw, nbanks, fmask_bankh;
737 struct legacy_surf_level *surflevel;
738 unsigned base_level, first_level, last_level;
739 unsigned dim, last_layer;
740 uint64_t va;
741 bool do_endian_swap = FALSE;
742
743 tile_split = tmp->surface.u.legacy.tile_split;
744 surflevel = tmp->surface.u.legacy.level;
745
746 /* Texturing with separate depth and stencil. */
747 if (tmp->db_compatible) {
748 switch (params->pipe_format) {
749 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
750 params->pipe_format = PIPE_FORMAT_Z32_FLOAT;
751 break;
752 case PIPE_FORMAT_X8Z24_UNORM:
753 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
754 /* Z24 is always stored like this for DB
755 * compatibility.
756 */
757 params->pipe_format = PIPE_FORMAT_Z24X8_UNORM;
758 break;
759 case PIPE_FORMAT_X24S8_UINT:
760 case PIPE_FORMAT_S8X24_UINT:
761 case PIPE_FORMAT_X32_S8X24_UINT:
762 params->pipe_format = PIPE_FORMAT_S8_UINT;
763 tile_split = tmp->surface.u.legacy.stencil_tile_split;
764 surflevel = tmp->surface.u.legacy.zs.stencil_level;
765 break;
766 default:;
767 }
768 }
769
770 if (R600_BIG_ENDIAN)
771 do_endian_swap = !tmp->db_compatible;
772
773 format = r600_translate_texformat(rctx->b.b.screen, params->pipe_format,
774 params->swizzle,
775 &word4, &yuv_format, do_endian_swap);
776 assert(format != ~0);
777 if (format == ~0) {
778 return -1;
779 }
780
781 endian = r600_colorformat_endian_swap(format, do_endian_swap);
782
783 base_level = 0;
784 first_level = params->first_level;
785 last_level = params->last_level;
786 width = params->width0;
787 height = params->height0;
788 depth = texture->depth0;
789
790 if (params->force_level) {
791 base_level = params->force_level;
792 first_level = 0;
793 last_level = 0;
794 width = u_minify(width, params->force_level);
795 height = u_minify(height, params->force_level);
796 depth = u_minify(depth, params->force_level);
797 }
798
799 pitch = surflevel[base_level].nblk_x * util_format_get_blockwidth(params->pipe_format);
800 non_disp_tiling = tmp->non_disp_tiling;
801
802 switch (surflevel[base_level].mode) {
803 default:
804 case RADEON_SURF_MODE_LINEAR_ALIGNED:
805 array_mode = V_028C70_ARRAY_LINEAR_ALIGNED;
806 break;
807 case RADEON_SURF_MODE_2D:
808 array_mode = V_028C70_ARRAY_2D_TILED_THIN1;
809 break;
810 case RADEON_SURF_MODE_1D:
811 array_mode = V_028C70_ARRAY_1D_TILED_THIN1;
812 break;
813 }
814 macro_aspect = tmp->surface.u.legacy.mtilea;
815 bankw = tmp->surface.u.legacy.bankw;
816 bankh = tmp->surface.u.legacy.bankh;
817 tile_split = eg_tile_split(tile_split);
818 macro_aspect = eg_macro_tile_aspect(macro_aspect);
819 bankw = eg_bank_wh(bankw);
820 bankh = eg_bank_wh(bankh);
821 fmask_bankh = eg_bank_wh(tmp->fmask.bank_height);
822
823 /* 128 bit formats require tile type = 1 */
824 if (rscreen->b.chip_class == CAYMAN) {
825 if (util_format_get_blocksize(params->pipe_format) >= 16)
826 non_disp_tiling = 1;
827 }
828 nbanks = eg_num_banks(rscreen->b.info.r600_num_banks);
829
830
831 va = tmp->resource.gpu_address;
832
833 /* array type views and views into array types need to use layer offset */
834 dim = r600_tex_dim(tmp, params->target, texture->nr_samples);
835
836 if (dim == V_030000_SQ_TEX_DIM_1D_ARRAY) {
837 height = 1;
838 depth = texture->array_size;
839 } else if (dim == V_030000_SQ_TEX_DIM_2D_ARRAY ||
840 dim == V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA) {
841 depth = texture->array_size;
842 } else if (dim == V_030000_SQ_TEX_DIM_CUBEMAP)
843 depth = texture->array_size / 6;
844
845 tex_resource_words[0] = (S_030000_DIM(dim) |
846 S_030000_PITCH((pitch / 8) - 1) |
847 S_030000_TEX_WIDTH(width - 1));
848 if (rscreen->b.chip_class == CAYMAN)
849 tex_resource_words[0] |= CM_S_030000_NON_DISP_TILING_ORDER(non_disp_tiling);
850 else
851 tex_resource_words[0] |= S_030000_NON_DISP_TILING_ORDER(non_disp_tiling);
852 tex_resource_words[1] = (S_030004_TEX_HEIGHT(height - 1) |
853 S_030004_TEX_DEPTH(depth - 1) |
854 S_030004_ARRAY_MODE(array_mode));
855 tex_resource_words[2] = ((uint64_t)surflevel[base_level].offset_256B * 256 + va) >> 8;
856
857 *skip_mip_address_reloc = false;
858 /* TEX_RESOURCE_WORD3.MIP_ADDRESS */
859 if (texture->nr_samples > 1 && rscreen->has_compressed_msaa_texturing) {
860 if (tmp->is_depth) {
861 /* disable FMASK (0 = disabled) */
862 tex_resource_words[3] = 0;
863 *skip_mip_address_reloc = true;
864 } else {
865 /* FMASK should be in MIP_ADDRESS for multisample textures */
866 tex_resource_words[3] = (tmp->fmask.offset + va) >> 8;
867 }
868 } else if (last_level && texture->nr_samples <= 1) {
869 tex_resource_words[3] = ((uint64_t)surflevel[1].offset_256B * 256 + va) >> 8;
870 } else {
871 tex_resource_words[3] = ((uint64_t)surflevel[base_level].offset_256B * 256 + va) >> 8;
872 }
873
874 last_layer = params->last_layer;
875 if (params->target != texture->target && depth == 1) {
876 last_layer = params->first_layer;
877 }
878 tex_resource_words[4] = (word4 |
879 S_030010_ENDIAN_SWAP(endian));
880 tex_resource_words[5] = S_030014_BASE_ARRAY(params->first_layer) |
881 S_030014_LAST_ARRAY(last_layer);
882 tex_resource_words[6] = S_030018_TILE_SPLIT(tile_split);
883
884 if (texture->nr_samples > 1) {
885 unsigned log_samples = util_logbase2(texture->nr_samples);
886 if (rscreen->b.chip_class == CAYMAN) {
887 tex_resource_words[4] |= S_030010_LOG2_NUM_FRAGMENTS(log_samples);
888 }
889 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */
890 tex_resource_words[5] |= S_030014_LAST_LEVEL(log_samples);
891 tex_resource_words[6] |= S_030018_FMASK_BANK_HEIGHT(fmask_bankh);
892 } else {
893 bool no_mip = first_level == last_level;
894
895 tex_resource_words[4] |= S_030010_BASE_LEVEL(first_level);
896 tex_resource_words[5] |= S_030014_LAST_LEVEL(last_level);
897 /* aniso max 16 samples */
898 tex_resource_words[6] |= S_030018_MAX_ANISO_RATIO(no_mip ? 0 : 4);
899 }
900
901 tex_resource_words[7] = S_03001C_DATA_FORMAT(format) |
902 S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_TEXTURE) |
903 S_03001C_BANK_WIDTH(bankw) |
904 S_03001C_BANK_HEIGHT(bankh) |
905 S_03001C_MACRO_TILE_ASPECT(macro_aspect) |
906 S_03001C_NUM_BANKS(nbanks) |
907 S_03001C_DEPTH_SAMPLE_ORDER(tmp->db_compatible);
908 return 0;
909 }
910
911 struct pipe_sampler_view *
evergreen_create_sampler_view_custom(struct pipe_context * ctx,struct pipe_resource * texture,const struct pipe_sampler_view * state,unsigned width0,unsigned height0,unsigned force_level)912 evergreen_create_sampler_view_custom(struct pipe_context *ctx,
913 struct pipe_resource *texture,
914 const struct pipe_sampler_view *state,
915 unsigned width0, unsigned height0,
916 unsigned force_level)
917 {
918 struct r600_context *rctx = (struct r600_context*)ctx;
919 struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
920 struct r600_texture *tmp = (struct r600_texture*)texture;
921 struct eg_tex_res_params params;
922 int ret;
923
924 if (!view)
925 return NULL;
926
927 /* initialize base object */
928 view->base = *state;
929 view->base.texture = NULL;
930 pipe_reference(NULL, &texture->reference);
931 view->base.texture = texture;
932 view->base.reference.count = 1;
933 view->base.context = ctx;
934
935 if (state->target == PIPE_BUFFER)
936 return texture_buffer_sampler_view(rctx, view, width0, height0);
937
938 memset(¶ms, 0, sizeof(params));
939 params.pipe_format = state->format;
940 params.force_level = force_level;
941 params.width0 = width0;
942 params.height0 = height0;
943 params.first_level = state->u.tex.first_level;
944 params.last_level = state->u.tex.last_level;
945 params.first_layer = state->u.tex.first_layer;
946 params.last_layer = state->u.tex.last_layer;
947 params.target = state->target;
948 params.swizzle[0] = state->swizzle_r;
949 params.swizzle[1] = state->swizzle_g;
950 params.swizzle[2] = state->swizzle_b;
951 params.swizzle[3] = state->swizzle_a;
952
953 ret = evergreen_fill_tex_resource_words(rctx, texture, ¶ms,
954 &view->skip_mip_address_reloc,
955 view->tex_resource_words);
956 if (ret != 0) {
957 FREE(view);
958 return NULL;
959 }
960
961 if (state->format == PIPE_FORMAT_X24S8_UINT ||
962 state->format == PIPE_FORMAT_S8X24_UINT ||
963 state->format == PIPE_FORMAT_X32_S8X24_UINT ||
964 state->format == PIPE_FORMAT_S8_UINT)
965 view->is_stencil_sampler = true;
966
967 view->tex_resource = &tmp->resource;
968
969 return &view->base;
970 }
971
972 static struct pipe_sampler_view *
evergreen_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * tex,const struct pipe_sampler_view * state)973 evergreen_create_sampler_view(struct pipe_context *ctx,
974 struct pipe_resource *tex,
975 const struct pipe_sampler_view *state)
976 {
977 return evergreen_create_sampler_view_custom(ctx, tex, state,
978 tex->width0, tex->height0, 0);
979 }
980
evergreen_emit_config_state(struct r600_context * rctx,struct r600_atom * atom)981 static void evergreen_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
982 {
983 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
984 struct r600_config_state *a = (struct r600_config_state*)atom;
985
986 radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3);
987 if (a->dyn_gpr_enabled) {
988 radeon_emit(cs, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx->r6xx_num_clause_temp_gprs));
989 radeon_emit(cs, 0);
990 radeon_emit(cs, 0);
991 } else {
992 radeon_emit(cs, a->sq_gpr_resource_mgmt_1);
993 radeon_emit(cs, a->sq_gpr_resource_mgmt_2);
994 radeon_emit(cs, a->sq_gpr_resource_mgmt_3);
995 }
996 radeon_set_config_reg(cs, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (a->dyn_gpr_enabled << 8));
997 if (a->dyn_gpr_enabled) {
998 radeon_set_context_reg(cs, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
999 S_028838_PS_GPRS(0x1e) |
1000 S_028838_VS_GPRS(0x1e) |
1001 S_028838_GS_GPRS(0x1e) |
1002 S_028838_ES_GPRS(0x1e) |
1003 S_028838_HS_GPRS(0x1e) |
1004 S_028838_LS_GPRS(0x1e)); /* workaround for hw issues with dyn gpr - must set all limits to 240 instead of 0, 0x1e == 240 / 8*/
1005 }
1006 }
1007
evergreen_emit_clip_state(struct r600_context * rctx,struct r600_atom * atom)1008 static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
1009 {
1010 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
1011 struct pipe_clip_state *state = &rctx->clip_state.state;
1012
1013 radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4);
1014 radeon_emit_array(cs, (unsigned*)state, 6*4);
1015 }
1016
evergreen_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * state)1017 static void evergreen_set_polygon_stipple(struct pipe_context *ctx,
1018 const struct pipe_poly_stipple *state)
1019 {
1020 }
1021
evergreen_get_scissor_rect(struct r600_context * rctx,unsigned tl_x,unsigned tl_y,unsigned br_x,unsigned br_y,uint32_t * tl,uint32_t * br)1022 static void evergreen_get_scissor_rect(struct r600_context *rctx,
1023 unsigned tl_x, unsigned tl_y, unsigned br_x, unsigned br_y,
1024 uint32_t *tl, uint32_t *br)
1025 {
1026 struct pipe_scissor_state scissor = {tl_x, tl_y, br_x, br_y};
1027
1028 evergreen_apply_scissor_bug_workaround(&rctx->b, &scissor);
1029
1030 *tl = S_028240_TL_X(scissor.minx) | S_028240_TL_Y(scissor.miny);
1031 *br = S_028244_BR_X(scissor.maxx) | S_028244_BR_Y(scissor.maxy);
1032 }
1033
1034 struct r600_tex_color_info {
1035 unsigned info;
1036 unsigned view;
1037 unsigned dim;
1038 unsigned pitch;
1039 unsigned slice;
1040 unsigned attrib;
1041 unsigned ntype;
1042 unsigned fmask;
1043 unsigned fmask_slice;
1044 uint64_t offset;
1045 boolean export_16bpc;
1046 };
1047
evergreen_set_color_surface_buffer(struct r600_context * rctx,struct r600_resource * res,enum pipe_format pformat,unsigned first_element,unsigned last_element,struct r600_tex_color_info * color)1048 static void evergreen_set_color_surface_buffer(struct r600_context *rctx,
1049 struct r600_resource *res,
1050 enum pipe_format pformat,
1051 unsigned first_element,
1052 unsigned last_element,
1053 struct r600_tex_color_info *color)
1054 {
1055 unsigned format, swap, ntype, endian;
1056 const struct util_format_description *desc;
1057 unsigned block_size = util_format_get_blocksize(res->b.b.format);
1058 unsigned pitch_alignment =
1059 MAX2(64, rctx->screen->b.info.pipe_interleave_bytes / block_size);
1060 unsigned pitch = align(res->b.b.width0, pitch_alignment);
1061 int i;
1062 unsigned width_elements;
1063
1064 width_elements = last_element - first_element + 1;
1065
1066 format = r600_translate_colorformat(rctx->b.chip_class, pformat, FALSE);
1067 swap = r600_translate_colorswap(pformat, FALSE);
1068
1069 endian = r600_colorformat_endian_swap(format, FALSE);
1070
1071 desc = util_format_description(pformat);
1072 for (i = 0; i < 4; i++) {
1073 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1074 break;
1075 }
1076 }
1077 ntype = V_028C70_NUMBER_UNORM;
1078 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
1079 ntype = V_028C70_NUMBER_SRGB;
1080 else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
1081 if (desc->channel[i].normalized)
1082 ntype = V_028C70_NUMBER_SNORM;
1083 else if (desc->channel[i].pure_integer)
1084 ntype = V_028C70_NUMBER_SINT;
1085 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
1086 if (desc->channel[i].normalized)
1087 ntype = V_028C70_NUMBER_UNORM;
1088 else if (desc->channel[i].pure_integer)
1089 ntype = V_028C70_NUMBER_UINT;
1090 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) {
1091 ntype = V_028C70_NUMBER_FLOAT;
1092 }
1093
1094 pitch = (pitch / 8) - 1;
1095 color->pitch = S_028C64_PITCH_TILE_MAX(pitch);
1096
1097 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED);
1098 color->info |= S_028C70_FORMAT(format) |
1099 S_028C70_COMP_SWAP(swap) |
1100 S_028C70_BLEND_CLAMP(0) |
1101 S_028C70_BLEND_BYPASS(1) |
1102 S_028C70_NUMBER_TYPE(ntype) |
1103 S_028C70_ENDIAN(endian);
1104 color->attrib = S_028C74_NON_DISP_TILING_ORDER(1);
1105 color->ntype = ntype;
1106 color->export_16bpc = false;
1107 color->dim = width_elements - 1;
1108 color->slice = 0; /* (width_elements / 64) - 1;*/
1109 color->view = 0;
1110 color->offset = (res->gpu_address + first_element) >> 8;
1111
1112 color->fmask = color->offset;
1113 color->fmask_slice = 0;
1114 }
1115
evergreen_set_color_surface_common(struct r600_context * rctx,struct r600_texture * rtex,unsigned level,unsigned first_layer,unsigned last_layer,enum pipe_format pformat,struct r600_tex_color_info * color)1116 static void evergreen_set_color_surface_common(struct r600_context *rctx,
1117 struct r600_texture *rtex,
1118 unsigned level,
1119 unsigned first_layer,
1120 unsigned last_layer,
1121 enum pipe_format pformat,
1122 struct r600_tex_color_info *color)
1123 {
1124 struct r600_screen *rscreen = rctx->screen;
1125 unsigned pitch, slice;
1126 unsigned non_disp_tiling, macro_aspect, tile_split, bankh, bankw, fmask_bankh, nbanks;
1127 unsigned format, swap, ntype, endian;
1128 const struct util_format_description *desc;
1129 bool blend_clamp = 0, blend_bypass = 0, do_endian_swap = FALSE;
1130 int i;
1131
1132 color->offset = (uint64_t)rtex->surface.u.legacy.level[level].offset_256B * 256;
1133 color->view = S_028C6C_SLICE_START(first_layer) |
1134 S_028C6C_SLICE_MAX(last_layer);
1135
1136 color->offset += rtex->resource.gpu_address;
1137 color->offset >>= 8;
1138
1139 color->dim = 0;
1140 pitch = (rtex->surface.u.legacy.level[level].nblk_x) / 8 - 1;
1141 slice = (rtex->surface.u.legacy.level[level].nblk_x * rtex->surface.u.legacy.level[level].nblk_y) / 64;
1142 if (slice) {
1143 slice = slice - 1;
1144 }
1145
1146 color->info = 0;
1147 switch (rtex->surface.u.legacy.level[level].mode) {
1148 default:
1149 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1150 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED);
1151 non_disp_tiling = 1;
1152 break;
1153 case RADEON_SURF_MODE_1D:
1154 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_1D_TILED_THIN1);
1155 non_disp_tiling = rtex->non_disp_tiling;
1156 break;
1157 case RADEON_SURF_MODE_2D:
1158 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_2D_TILED_THIN1);
1159 non_disp_tiling = rtex->non_disp_tiling;
1160 break;
1161 }
1162 tile_split = rtex->surface.u.legacy.tile_split;
1163 macro_aspect = rtex->surface.u.legacy.mtilea;
1164 bankw = rtex->surface.u.legacy.bankw;
1165 bankh = rtex->surface.u.legacy.bankh;
1166 if (rtex->fmask.size)
1167 fmask_bankh = rtex->fmask.bank_height;
1168 else
1169 fmask_bankh = rtex->surface.u.legacy.bankh;
1170 tile_split = eg_tile_split(tile_split);
1171 macro_aspect = eg_macro_tile_aspect(macro_aspect);
1172 bankw = eg_bank_wh(bankw);
1173 bankh = eg_bank_wh(bankh);
1174 fmask_bankh = eg_bank_wh(fmask_bankh);
1175
1176 if (rscreen->b.chip_class == CAYMAN) {
1177 if (util_format_get_blocksize(pformat) >= 16)
1178 non_disp_tiling = 1;
1179 }
1180 nbanks = eg_num_banks(rscreen->b.info.r600_num_banks);
1181 desc = util_format_description(pformat);
1182 for (i = 0; i < 4; i++) {
1183 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1184 break;
1185 }
1186 }
1187 color->attrib = S_028C74_TILE_SPLIT(tile_split)|
1188 S_028C74_NUM_BANKS(nbanks) |
1189 S_028C74_BANK_WIDTH(bankw) |
1190 S_028C74_BANK_HEIGHT(bankh) |
1191 S_028C74_MACRO_TILE_ASPECT(macro_aspect) |
1192 S_028C74_NON_DISP_TILING_ORDER(non_disp_tiling) |
1193 S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
1194
1195 if (rctx->b.chip_class == CAYMAN) {
1196 color->attrib |= S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] ==
1197 PIPE_SWIZZLE_1);
1198
1199 if (rtex->resource.b.b.nr_samples > 1) {
1200 unsigned log_samples = util_logbase2(rtex->resource.b.b.nr_samples);
1201 color->attrib |= S_028C74_NUM_SAMPLES(log_samples) |
1202 S_028C74_NUM_FRAGMENTS(log_samples);
1203 }
1204 }
1205
1206 ntype = V_028C70_NUMBER_UNORM;
1207 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
1208 ntype = V_028C70_NUMBER_SRGB;
1209 else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
1210 if (desc->channel[i].normalized)
1211 ntype = V_028C70_NUMBER_SNORM;
1212 else if (desc->channel[i].pure_integer)
1213 ntype = V_028C70_NUMBER_SINT;
1214 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
1215 if (desc->channel[i].normalized)
1216 ntype = V_028C70_NUMBER_UNORM;
1217 else if (desc->channel[i].pure_integer)
1218 ntype = V_028C70_NUMBER_UINT;
1219 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) {
1220 ntype = V_028C70_NUMBER_FLOAT;
1221 }
1222
1223 if (R600_BIG_ENDIAN)
1224 do_endian_swap = !rtex->db_compatible;
1225
1226 format = r600_translate_colorformat(rctx->b.chip_class, pformat, do_endian_swap);
1227 assert(format != ~0);
1228 swap = r600_translate_colorswap(pformat, do_endian_swap);
1229 assert(swap != ~0);
1230
1231 endian = r600_colorformat_endian_swap(format, do_endian_swap);
1232
1233 /* blend clamp should be set for all NORM/SRGB types */
1234 if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM ||
1235 ntype == V_028C70_NUMBER_SRGB)
1236 blend_clamp = 1;
1237
1238 /* set blend bypass according to docs if SINT/UINT or
1239 8/24 COLOR variants */
1240 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
1241 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
1242 format == V_028C70_COLOR_X24_8_32_FLOAT) {
1243 blend_clamp = 0;
1244 blend_bypass = 1;
1245 }
1246
1247 color->ntype = ntype;
1248 color->info |= S_028C70_FORMAT(format) |
1249 S_028C70_COMP_SWAP(swap) |
1250 S_028C70_BLEND_CLAMP(blend_clamp) |
1251 S_028C70_BLEND_BYPASS(blend_bypass) |
1252 S_028C70_SIMPLE_FLOAT(1) |
1253 S_028C70_NUMBER_TYPE(ntype) |
1254 S_028C70_ENDIAN(endian);
1255
1256 if (rtex->fmask.size) {
1257 color->info |= S_028C70_COMPRESSION(1);
1258 }
1259
1260 /* EXPORT_NORM is an optimization that can be enabled for better
1261 * performance in certain cases.
1262 * EXPORT_NORM can be enabled if:
1263 * - 11-bit or smaller UNORM/SNORM/SRGB
1264 * - 16-bit or smaller FLOAT
1265 */
1266 color->export_16bpc = false;
1267 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS &&
1268 ((desc->channel[i].size < 12 &&
1269 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT &&
1270 ntype != V_028C70_NUMBER_UINT && ntype != V_028C70_NUMBER_SINT) ||
1271 (desc->channel[i].size < 17 &&
1272 desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))) {
1273 color->info |= S_028C70_SOURCE_FORMAT(V_028C70_EXPORT_4C_16BPC);
1274 color->export_16bpc = true;
1275 }
1276
1277 color->pitch = S_028C64_PITCH_TILE_MAX(pitch);
1278 color->slice = S_028C68_SLICE_TILE_MAX(slice);
1279
1280 if (rtex->fmask.size) {
1281 color->fmask = (rtex->resource.gpu_address + rtex->fmask.offset) >> 8;
1282 color->fmask_slice = S_028C88_TILE_MAX(rtex->fmask.slice_tile_max);
1283 } else {
1284 color->fmask = color->offset;
1285 color->fmask_slice = S_028C88_TILE_MAX(slice);
1286 }
1287 }
1288
1289 /**
1290 * This function initializes the CB* register values for RATs. It is meant
1291 * to be used for 1D aligned buffers that do not have an associated
1292 * radeon_surf.
1293 */
evergreen_init_color_surface_rat(struct r600_context * rctx,struct r600_surface * surf)1294 void evergreen_init_color_surface_rat(struct r600_context *rctx,
1295 struct r600_surface *surf)
1296 {
1297 struct pipe_resource *pipe_buffer = surf->base.texture;
1298 struct r600_tex_color_info color;
1299
1300 evergreen_set_color_surface_buffer(rctx, (struct r600_resource *)surf->base.texture,
1301 surf->base.format, 0, pipe_buffer->width0,
1302 &color);
1303
1304 surf->cb_color_base = color.offset;
1305 surf->cb_color_dim = color.dim;
1306 surf->cb_color_info = color.info | S_028C70_RAT(1);
1307 surf->cb_color_pitch = color.pitch;
1308 surf->cb_color_slice = color.slice;
1309 surf->cb_color_view = color.view;
1310 surf->cb_color_attrib = color.attrib;
1311 surf->cb_color_fmask = color.fmask;
1312 surf->cb_color_fmask_slice = color.fmask_slice;
1313
1314 surf->cb_color_view = 0;
1315
1316 /* Set the buffer range the GPU will have access to: */
1317 util_range_add(pipe_buffer, &r600_resource(pipe_buffer)->valid_buffer_range,
1318 0, pipe_buffer->width0);
1319 }
1320
1321
evergreen_init_color_surface(struct r600_context * rctx,struct r600_surface * surf)1322 void evergreen_init_color_surface(struct r600_context *rctx,
1323 struct r600_surface *surf)
1324 {
1325 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
1326 unsigned level = surf->base.u.tex.level;
1327 struct r600_tex_color_info color;
1328
1329 evergreen_set_color_surface_common(rctx, rtex, level,
1330 surf->base.u.tex.first_layer,
1331 surf->base.u.tex.last_layer,
1332 surf->base.format,
1333 &color);
1334
1335 surf->alphatest_bypass = color.ntype == V_028C70_NUMBER_UINT ||
1336 color.ntype == V_028C70_NUMBER_SINT;
1337 surf->export_16bpc = color.export_16bpc;
1338
1339 /* XXX handle enabling of CB beyond BASE8 which has different offset */
1340 surf->cb_color_base = color.offset;
1341 surf->cb_color_dim = color.dim;
1342 surf->cb_color_info = color.info;
1343 surf->cb_color_pitch = color.pitch;
1344 surf->cb_color_slice = color.slice;
1345 surf->cb_color_view = color.view;
1346 surf->cb_color_attrib = color.attrib;
1347 surf->cb_color_fmask = color.fmask;
1348 surf->cb_color_fmask_slice = color.fmask_slice;
1349
1350 surf->color_initialized = true;
1351 }
1352
evergreen_init_depth_surface(struct r600_context * rctx,struct r600_surface * surf)1353 static void evergreen_init_depth_surface(struct r600_context *rctx,
1354 struct r600_surface *surf)
1355 {
1356 struct r600_screen *rscreen = rctx->screen;
1357 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
1358 unsigned level = surf->base.u.tex.level;
1359 struct legacy_surf_level *levelinfo = &rtex->surface.u.legacy.level[level];
1360 uint64_t offset;
1361 unsigned format, array_mode;
1362 unsigned macro_aspect, tile_split, bankh, bankw, nbanks;
1363
1364
1365 format = r600_translate_dbformat(surf->base.format);
1366 assert(format != ~0);
1367
1368 offset = rtex->resource.gpu_address;
1369 offset += (uint64_t)rtex->surface.u.legacy.level[level].offset_256B * 256;
1370
1371 switch (rtex->surface.u.legacy.level[level].mode) {
1372 case RADEON_SURF_MODE_2D:
1373 array_mode = V_028C70_ARRAY_2D_TILED_THIN1;
1374 break;
1375 case RADEON_SURF_MODE_1D:
1376 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1377 default:
1378 array_mode = V_028C70_ARRAY_1D_TILED_THIN1;
1379 break;
1380 }
1381 tile_split = rtex->surface.u.legacy.tile_split;
1382 macro_aspect = rtex->surface.u.legacy.mtilea;
1383 bankw = rtex->surface.u.legacy.bankw;
1384 bankh = rtex->surface.u.legacy.bankh;
1385 tile_split = eg_tile_split(tile_split);
1386 macro_aspect = eg_macro_tile_aspect(macro_aspect);
1387 bankw = eg_bank_wh(bankw);
1388 bankh = eg_bank_wh(bankh);
1389 nbanks = eg_num_banks(rscreen->b.info.r600_num_banks);
1390 offset >>= 8;
1391
1392 surf->db_z_info = S_028040_ARRAY_MODE(array_mode) |
1393 S_028040_FORMAT(format) |
1394 S_028040_TILE_SPLIT(tile_split)|
1395 S_028040_NUM_BANKS(nbanks) |
1396 S_028040_BANK_WIDTH(bankw) |
1397 S_028040_BANK_HEIGHT(bankh) |
1398 S_028040_MACRO_TILE_ASPECT(macro_aspect);
1399 if (rscreen->b.chip_class == CAYMAN && rtex->resource.b.b.nr_samples > 1) {
1400 surf->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples));
1401 }
1402
1403 assert(levelinfo->nblk_x % 8 == 0 && levelinfo->nblk_y % 8 == 0);
1404
1405 surf->db_depth_base = offset;
1406 surf->db_depth_view = S_028008_SLICE_START(surf->base.u.tex.first_layer) |
1407 S_028008_SLICE_MAX(surf->base.u.tex.last_layer);
1408 surf->db_depth_size = S_028058_PITCH_TILE_MAX(levelinfo->nblk_x / 8 - 1) |
1409 S_028058_HEIGHT_TILE_MAX(levelinfo->nblk_y / 8 - 1);
1410 surf->db_depth_slice = S_02805C_SLICE_TILE_MAX(levelinfo->nblk_x *
1411 levelinfo->nblk_y / 64 - 1);
1412
1413 if (rtex->surface.has_stencil) {
1414 uint64_t stencil_offset;
1415 unsigned stile_split = rtex->surface.u.legacy.stencil_tile_split;
1416
1417 stile_split = eg_tile_split(stile_split);
1418
1419 stencil_offset = (uint64_t)rtex->surface.u.legacy.zs.stencil_level[level].offset_256B * 256;
1420 stencil_offset += rtex->resource.gpu_address;
1421
1422 surf->db_stencil_base = stencil_offset >> 8;
1423 surf->db_stencil_info = S_028044_FORMAT(V_028044_STENCIL_8) |
1424 S_028044_TILE_SPLIT(stile_split);
1425 } else {
1426 surf->db_stencil_base = offset;
1427 /* DRM 2.6.18 allows the INVALID format to disable stencil.
1428 * Older kernels are out of luck. */
1429 surf->db_stencil_info = rctx->screen->b.info.drm_minor >= 18 ?
1430 S_028044_FORMAT(V_028044_STENCIL_INVALID) :
1431 S_028044_FORMAT(V_028044_STENCIL_8);
1432 }
1433
1434 if (r600_htile_enabled(rtex, level)) {
1435 uint64_t va = rtex->resource.gpu_address + rtex->htile_offset;
1436 surf->db_htile_data_base = va >> 8;
1437 surf->db_htile_surface = S_028ABC_HTILE_WIDTH(1) |
1438 S_028ABC_HTILE_HEIGHT(1) |
1439 S_028ABC_FULL_CACHE(1);
1440 surf->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
1441 surf->db_preload_control = 0;
1442 }
1443
1444 surf->depth_initialized = true;
1445 }
1446
evergreen_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)1447 static void evergreen_set_framebuffer_state(struct pipe_context *ctx,
1448 const struct pipe_framebuffer_state *state)
1449 {
1450 struct r600_context *rctx = (struct r600_context *)ctx;
1451 struct r600_surface *surf;
1452 struct r600_texture *rtex;
1453 uint32_t i, log_samples;
1454 uint32_t target_mask = 0;
1455 /* Flush TC when changing the framebuffer state, because the only
1456 * client not using TC that can change textures is the framebuffer.
1457 * Other places don't typically have to flush TC.
1458 */
1459 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE |
1460 R600_CONTEXT_FLUSH_AND_INV |
1461 R600_CONTEXT_FLUSH_AND_INV_CB |
1462 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1463 R600_CONTEXT_FLUSH_AND_INV_DB |
1464 R600_CONTEXT_FLUSH_AND_INV_DB_META |
1465 R600_CONTEXT_INV_TEX_CACHE;
1466
1467 util_copy_framebuffer_state(&rctx->framebuffer.state, state);
1468
1469 /* Colorbuffers. */
1470 rctx->framebuffer.export_16bpc = state->nr_cbufs != 0;
1471 rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] &&
1472 util_format_is_pure_integer(state->cbufs[0]->format);
1473 rctx->framebuffer.compressed_cb_mask = 0;
1474 rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state);
1475
1476 for (i = 0; i < state->nr_cbufs; i++) {
1477 surf = (struct r600_surface*)state->cbufs[i];
1478 if (!surf)
1479 continue;
1480
1481 target_mask |= (0xf << (i * 4));
1482
1483 rtex = (struct r600_texture*)surf->base.texture;
1484
1485 r600_context_add_resource_size(ctx, state->cbufs[i]->texture);
1486
1487 if (!surf->color_initialized) {
1488 evergreen_init_color_surface(rctx, surf);
1489 }
1490
1491 if (!surf->export_16bpc) {
1492 rctx->framebuffer.export_16bpc = false;
1493 }
1494
1495 if (rtex->fmask.size) {
1496 rctx->framebuffer.compressed_cb_mask |= 1 << i;
1497 }
1498 }
1499
1500 /* Update alpha-test state dependencies.
1501 * Alpha-test is done on the first colorbuffer only. */
1502 if (state->nr_cbufs) {
1503 bool alphatest_bypass = false;
1504 bool export_16bpc = true;
1505
1506 surf = (struct r600_surface*)state->cbufs[0];
1507 if (surf) {
1508 alphatest_bypass = surf->alphatest_bypass;
1509 export_16bpc = surf->export_16bpc;
1510 }
1511
1512 if (rctx->alphatest_state.bypass != alphatest_bypass) {
1513 rctx->alphatest_state.bypass = alphatest_bypass;
1514 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1515 }
1516 if (rctx->alphatest_state.cb0_export_16bpc != export_16bpc) {
1517 rctx->alphatest_state.cb0_export_16bpc = export_16bpc;
1518 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1519 }
1520 }
1521
1522 /* ZS buffer. */
1523 if (state->zsbuf) {
1524 surf = (struct r600_surface*)state->zsbuf;
1525
1526 r600_context_add_resource_size(ctx, state->zsbuf->texture);
1527
1528 if (!surf->depth_initialized) {
1529 evergreen_init_depth_surface(rctx, surf);
1530 }
1531
1532 if (state->zsbuf->format != rctx->poly_offset_state.zs_format) {
1533 rctx->poly_offset_state.zs_format = state->zsbuf->format;
1534 r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom);
1535 }
1536
1537 if (rctx->db_state.rsurf != surf) {
1538 rctx->db_state.rsurf = surf;
1539 r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
1540 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1541 }
1542 } else if (rctx->db_state.rsurf) {
1543 rctx->db_state.rsurf = NULL;
1544 r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
1545 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1546 }
1547
1548 if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs ||
1549 rctx->cb_misc_state.bound_cbufs_target_mask != target_mask) {
1550 rctx->cb_misc_state.bound_cbufs_target_mask = target_mask;
1551 rctx->cb_misc_state.nr_cbufs = state->nr_cbufs;
1552 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
1553 }
1554
1555 if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) {
1556 rctx->alphatest_state.bypass = false;
1557 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1558 }
1559
1560 log_samples = util_logbase2(rctx->framebuffer.nr_samples);
1561 /* This is for Cayman to program SAMPLE_RATE, and for RV770 to fix a hw bug. */
1562 if ((rctx->b.chip_class == CAYMAN ||
1563 rctx->b.family == CHIP_RV770) &&
1564 rctx->db_misc_state.log_samples != log_samples) {
1565 rctx->db_misc_state.log_samples = log_samples;
1566 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1567 }
1568
1569
1570 /* Calculate the CS size. */
1571 rctx->framebuffer.atom.num_dw = 4; /* SCISSOR */
1572
1573 /* MSAA. */
1574 if (rctx->b.chip_class == EVERGREEN)
1575 rctx->framebuffer.atom.num_dw += 17; /* Evergreen */
1576 else
1577 rctx->framebuffer.atom.num_dw += 28; /* Cayman */
1578
1579 /* Colorbuffers. */
1580 rctx->framebuffer.atom.num_dw += state->nr_cbufs * 23;
1581 rctx->framebuffer.atom.num_dw += state->nr_cbufs * 2;
1582 rctx->framebuffer.atom.num_dw += (12 - state->nr_cbufs) * 3;
1583
1584 /* ZS buffer. */
1585 if (state->zsbuf) {
1586 rctx->framebuffer.atom.num_dw += 24;
1587 rctx->framebuffer.atom.num_dw += 2;
1588 } else if (rctx->screen->b.info.drm_minor >= 18) {
1589 rctx->framebuffer.atom.num_dw += 4;
1590 }
1591
1592 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
1593
1594 r600_set_sample_locations_constant_buffer(rctx);
1595 rctx->framebuffer.do_update_surf_dirtiness = true;
1596 }
1597
evergreen_set_min_samples(struct pipe_context * ctx,unsigned min_samples)1598 static void evergreen_set_min_samples(struct pipe_context *ctx, unsigned min_samples)
1599 {
1600 struct r600_context *rctx = (struct r600_context *)ctx;
1601
1602 if (rctx->ps_iter_samples == min_samples)
1603 return;
1604
1605 rctx->ps_iter_samples = min_samples;
1606 if (rctx->framebuffer.nr_samples > 1) {
1607 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
1608 }
1609 }
1610
1611 /* 8xMSAA */
1612 static const uint32_t sample_locs_8x[] = {
1613 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1614 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1615 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1616 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1617 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1618 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1619 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
1620 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
1621 };
1622 static unsigned max_dist_8x = 7;
1623
evergreen_get_sample_position(struct pipe_context * ctx,unsigned sample_count,unsigned sample_index,float * out_value)1624 static void evergreen_get_sample_position(struct pipe_context *ctx,
1625 unsigned sample_count,
1626 unsigned sample_index,
1627 float *out_value)
1628 {
1629 int offset, index;
1630 struct {
1631 int idx:4;
1632 } val;
1633 switch (sample_count) {
1634 case 1:
1635 default:
1636 out_value[0] = out_value[1] = 0.5;
1637 break;
1638 case 2:
1639 offset = 4 * (sample_index * 2);
1640 val.idx = (eg_sample_locs_2x[0] >> offset) & 0xf;
1641 out_value[0] = (float)(val.idx + 8) / 16.0f;
1642 val.idx = (eg_sample_locs_2x[0] >> (offset + 4)) & 0xf;
1643 out_value[1] = (float)(val.idx + 8) / 16.0f;
1644 break;
1645 case 4:
1646 offset = 4 * (sample_index * 2);
1647 val.idx = (eg_sample_locs_4x[0] >> offset) & 0xf;
1648 out_value[0] = (float)(val.idx + 8) / 16.0f;
1649 val.idx = (eg_sample_locs_4x[0] >> (offset + 4)) & 0xf;
1650 out_value[1] = (float)(val.idx + 8) / 16.0f;
1651 break;
1652 case 8:
1653 offset = 4 * (sample_index % 4 * 2);
1654 index = (sample_index / 4);
1655 val.idx = (sample_locs_8x[index] >> offset) & 0xf;
1656 out_value[0] = (float)(val.idx + 8) / 16.0f;
1657 val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf;
1658 out_value[1] = (float)(val.idx + 8) / 16.0f;
1659 break;
1660 }
1661 }
1662
evergreen_emit_msaa_state(struct r600_context * rctx,int nr_samples,int ps_iter_samples)1663 static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples, int ps_iter_samples)
1664 {
1665
1666 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
1667 unsigned max_dist = 0;
1668
1669 switch (nr_samples) {
1670 default:
1671 nr_samples = 0;
1672 break;
1673 case 2:
1674 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(eg_sample_locs_2x));
1675 radeon_emit_array(cs, eg_sample_locs_2x, ARRAY_SIZE(eg_sample_locs_2x));
1676 max_dist = eg_max_dist_2x;
1677 break;
1678 case 4:
1679 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(eg_sample_locs_4x));
1680 radeon_emit_array(cs, eg_sample_locs_4x, ARRAY_SIZE(eg_sample_locs_4x));
1681 max_dist = eg_max_dist_4x;
1682 break;
1683 case 8:
1684 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(sample_locs_8x));
1685 radeon_emit_array(cs, sample_locs_8x, ARRAY_SIZE(sample_locs_8x));
1686 max_dist = max_dist_8x;
1687 break;
1688 }
1689
1690 if (nr_samples > 1) {
1691 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
1692 radeon_emit(cs, S_028C00_LAST_PIXEL(1) |
1693 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
1694 radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
1695 S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */
1696 radeon_set_context_reg(cs, R_028A4C_PA_SC_MODE_CNTL_1,
1697 EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1) |
1698 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1699 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1));
1700 } else {
1701 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
1702 radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
1703 radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
1704 radeon_set_context_reg(cs, R_028A4C_PA_SC_MODE_CNTL_1,
1705 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1706 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1));
1707 }
1708 }
1709
evergreen_emit_image_state(struct r600_context * rctx,struct r600_atom * atom,int immed_id_base,int res_id_base,int offset,uint32_t pkt_flags)1710 static void evergreen_emit_image_state(struct r600_context *rctx, struct r600_atom *atom,
1711 int immed_id_base, int res_id_base, int offset, uint32_t pkt_flags)
1712 {
1713 struct r600_image_state *state = (struct r600_image_state *)atom;
1714 struct pipe_framebuffer_state *fb_state = &rctx->framebuffer.state;
1715 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
1716 struct r600_texture *rtex;
1717 struct r600_resource *resource;
1718 int i;
1719
1720 for (i = 0; i < R600_MAX_IMAGES; i++) {
1721 struct r600_image_view *image = &state->views[i];
1722 unsigned reloc, immed_reloc;
1723 int idx = i + offset;
1724
1725 if (!pkt_flags)
1726 idx += fb_state->nr_cbufs + (rctx->dual_src_blend ? 1 : 0);
1727 if (!image->base.resource)
1728 continue;
1729
1730 resource = (struct r600_resource *)image->base.resource;
1731 if (resource->b.b.target != PIPE_BUFFER)
1732 rtex = (struct r600_texture *)image->base.resource;
1733 else
1734 rtex = NULL;
1735
1736 reloc = radeon_add_to_buffer_list(&rctx->b,
1737 &rctx->b.gfx,
1738 resource,
1739 RADEON_USAGE_READWRITE,
1740 RADEON_PRIO_SHADER_RW_BUFFER);
1741
1742 immed_reloc = radeon_add_to_buffer_list(&rctx->b,
1743 &rctx->b.gfx,
1744 resource->immed_buffer,
1745 RADEON_USAGE_READWRITE,
1746 RADEON_PRIO_SHADER_RW_BUFFER);
1747
1748 if (pkt_flags)
1749 radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13);
1750 else
1751 radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13);
1752
1753 radeon_emit(cs, image->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
1754 radeon_emit(cs, image->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
1755 radeon_emit(cs, image->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
1756 radeon_emit(cs, image->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
1757 radeon_emit(cs, image->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
1758 radeon_emit(cs, image->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
1759 radeon_emit(cs, image->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
1760 radeon_emit(cs, rtex ? rtex->cmask.base_address_reg : image->cb_color_base); /* R_028C7C_CB_COLOR0_CMASK */
1761 radeon_emit(cs, rtex ? rtex->cmask.slice_tile_max : 0); /* R_028C80_CB_COLOR0_CMASK_SLICE */
1762 radeon_emit(cs, image->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */
1763 radeon_emit(cs, image->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */
1764 radeon_emit(cs, rtex ? rtex->color_clear_value[0] : 0); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
1765 radeon_emit(cs, rtex ? rtex->color_clear_value[1] : 0); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */
1766
1767 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
1768 radeon_emit(cs, reloc);
1769
1770 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
1771 radeon_emit(cs, reloc);
1772
1773 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */
1774 radeon_emit(cs, reloc);
1775
1776 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */
1777 radeon_emit(cs, reloc);
1778
1779 if (pkt_flags)
1780 radeon_compute_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8);
1781 else
1782 radeon_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8);
1783
1784 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /**/
1785 radeon_emit(cs, immed_reloc);
1786
1787 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
1788 radeon_emit(cs, (immed_id_base + i + offset) * 8);
1789 radeon_emit_array(cs, image->immed_resource_words, 8);
1790
1791 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
1792 radeon_emit(cs, immed_reloc);
1793
1794 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
1795 radeon_emit(cs, (res_id_base + i + offset) * 8);
1796 radeon_emit_array(cs, image->resource_words, 8);
1797
1798 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
1799 radeon_emit(cs, reloc);
1800
1801 if (!image->skip_mip_address_reloc) {
1802 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
1803 radeon_emit(cs, reloc);
1804 }
1805 }
1806 }
1807
evergreen_emit_fragment_image_state(struct r600_context * rctx,struct r600_atom * atom)1808 static void evergreen_emit_fragment_image_state(struct r600_context *rctx, struct r600_atom *atom)
1809 {
1810 evergreen_emit_image_state(rctx, atom,
1811 R600_IMAGE_IMMED_RESOURCE_OFFSET,
1812 R600_IMAGE_REAL_RESOURCE_OFFSET, 0, 0);
1813 }
1814
evergreen_emit_compute_image_state(struct r600_context * rctx,struct r600_atom * atom)1815 static void evergreen_emit_compute_image_state(struct r600_context *rctx, struct r600_atom *atom)
1816 {
1817 evergreen_emit_image_state(rctx, atom,
1818 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET,
1819 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET,
1820 0, RADEON_CP_PACKET3_COMPUTE_MODE);
1821 }
1822
evergreen_emit_fragment_buffer_state(struct r600_context * rctx,struct r600_atom * atom)1823 static void evergreen_emit_fragment_buffer_state(struct r600_context *rctx, struct r600_atom *atom)
1824 {
1825 int offset = util_bitcount(rctx->fragment_images.enabled_mask);
1826 evergreen_emit_image_state(rctx, atom,
1827 R600_IMAGE_IMMED_RESOURCE_OFFSET,
1828 R600_IMAGE_REAL_RESOURCE_OFFSET, offset, 0);
1829 }
1830
evergreen_emit_compute_buffer_state(struct r600_context * rctx,struct r600_atom * atom)1831 static void evergreen_emit_compute_buffer_state(struct r600_context *rctx, struct r600_atom *atom)
1832 {
1833 int offset = util_bitcount(rctx->compute_images.enabled_mask);
1834 evergreen_emit_image_state(rctx, atom,
1835 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET,
1836 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET,
1837 offset, RADEON_CP_PACKET3_COMPUTE_MODE);
1838 }
1839
evergreen_emit_framebuffer_state(struct r600_context * rctx,struct r600_atom * atom)1840 static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
1841 {
1842 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
1843 struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
1844 unsigned nr_cbufs = state->nr_cbufs;
1845 unsigned i, tl, br;
1846 struct r600_texture *tex = NULL;
1847 struct r600_surface *cb = NULL;
1848
1849 /* XXX support more colorbuffers once we need them */
1850 assert(nr_cbufs <= 8);
1851 if (nr_cbufs > 8)
1852 nr_cbufs = 8;
1853
1854 /* Colorbuffers. */
1855 for (i = 0; i < nr_cbufs; i++) {
1856 unsigned reloc, cmask_reloc;
1857
1858 cb = (struct r600_surface*)state->cbufs[i];
1859 if (!cb) {
1860 radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1861 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1862 continue;
1863 }
1864
1865 tex = (struct r600_texture *)cb->base.texture;
1866 reloc = radeon_add_to_buffer_list(&rctx->b,
1867 &rctx->b.gfx,
1868 (struct r600_resource*)cb->base.texture,
1869 RADEON_USAGE_READWRITE,
1870 tex->resource.b.b.nr_samples > 1 ?
1871 RADEON_PRIO_COLOR_BUFFER_MSAA :
1872 RADEON_PRIO_COLOR_BUFFER);
1873
1874 if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
1875 cmask_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
1876 tex->cmask_buffer, RADEON_USAGE_READWRITE,
1877 RADEON_PRIO_SEPARATE_META);
1878 } else {
1879 cmask_reloc = reloc;
1880 }
1881
1882 radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13);
1883 radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
1884 radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
1885 radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
1886 radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
1887 radeon_emit(cs, cb->cb_color_info | tex->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
1888 radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
1889 radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
1890 radeon_emit(cs, tex->cmask.base_address_reg); /* R_028C7C_CB_COLOR0_CMASK */
1891 radeon_emit(cs, tex->cmask.slice_tile_max); /* R_028C80_CB_COLOR0_CMASK_SLICE */
1892 radeon_emit(cs, cb->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */
1893 radeon_emit(cs, cb->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */
1894 radeon_emit(cs, tex->color_clear_value[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
1895 radeon_emit(cs, tex->color_clear_value[1]); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */
1896
1897 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
1898 radeon_emit(cs, reloc);
1899
1900 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
1901 radeon_emit(cs, reloc);
1902
1903 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */
1904 radeon_emit(cs, cmask_reloc);
1905
1906 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */
1907 radeon_emit(cs, reloc);
1908 }
1909 /* set CB_COLOR1_INFO for possible dual-src blending */
1910 if (rctx->framebuffer.dual_src_blend && i == 1 && state->cbufs[0]) {
1911 radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
1912 cb->cb_color_info | tex->cb_color_info);
1913 i++;
1914 }
1915 i += util_bitcount(rctx->fragment_images.enabled_mask);
1916 i += util_bitcount(rctx->fragment_buffers.enabled_mask);
1917 for (; i < 8 ; i++)
1918 radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0);
1919 for (; i < 12; i++)
1920 radeon_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, 0);
1921
1922 /* ZS buffer. */
1923 if (state->zsbuf) {
1924 struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
1925 unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
1926 &rctx->b.gfx,
1927 (struct r600_resource*)state->zsbuf->texture,
1928 RADEON_USAGE_READWRITE,
1929 zb->base.texture->nr_samples > 1 ?
1930 RADEON_PRIO_DEPTH_BUFFER_MSAA :
1931 RADEON_PRIO_DEPTH_BUFFER);
1932
1933 radeon_set_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
1934
1935 radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 8);
1936 radeon_emit(cs, zb->db_z_info); /* R_028040_DB_Z_INFO */
1937 radeon_emit(cs, zb->db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1938 radeon_emit(cs, zb->db_depth_base); /* R_028048_DB_Z_READ_BASE */
1939 radeon_emit(cs, zb->db_stencil_base); /* R_02804C_DB_STENCIL_READ_BASE */
1940 radeon_emit(cs, zb->db_depth_base); /* R_028050_DB_Z_WRITE_BASE */
1941 radeon_emit(cs, zb->db_stencil_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1942 radeon_emit(cs, zb->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1943 radeon_emit(cs, zb->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1944
1945 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028048_DB_Z_READ_BASE */
1946 radeon_emit(cs, reloc);
1947
1948 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_02804C_DB_STENCIL_READ_BASE */
1949 radeon_emit(cs, reloc);
1950
1951 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028050_DB_Z_WRITE_BASE */
1952 radeon_emit(cs, reloc);
1953
1954 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028054_DB_STENCIL_WRITE_BASE */
1955 radeon_emit(cs, reloc);
1956 } else if (rctx->screen->b.info.drm_minor >= 18) {
1957 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
1958 * Older kernels are out of luck. */
1959 radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 2);
1960 radeon_emit(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
1961 radeon_emit(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
1962 }
1963
1964 /* Framebuffer dimensions. */
1965 evergreen_get_scissor_rect(rctx, 0, 0, state->width, state->height, &tl, &br);
1966
1967 radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
1968 radeon_emit(cs, tl); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
1969 radeon_emit(cs, br); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
1970
1971 if (rctx->b.chip_class == EVERGREEN) {
1972 evergreen_emit_msaa_state(rctx, rctx->framebuffer.nr_samples, rctx->ps_iter_samples);
1973 } else {
1974 cayman_emit_msaa_state(cs, rctx->framebuffer.nr_samples,
1975 rctx->ps_iter_samples, 0);
1976 }
1977 }
1978
evergreen_emit_polygon_offset(struct r600_context * rctx,struct r600_atom * a)1979 static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
1980 {
1981 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
1982 struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
1983 float offset_units = state->offset_units;
1984 float offset_scale = state->offset_scale;
1985 uint32_t pa_su_poly_offset_db_fmt_cntl = 0;
1986
1987 if (!state->offset_units_unscaled) {
1988 switch (state->zs_format) {
1989 case PIPE_FORMAT_Z24X8_UNORM:
1990 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1991 case PIPE_FORMAT_X8Z24_UNORM:
1992 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1993 offset_units *= 2.0f;
1994 pa_su_poly_offset_db_fmt_cntl =
1995 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
1996 break;
1997 case PIPE_FORMAT_Z16_UNORM:
1998 offset_units *= 4.0f;
1999 pa_su_poly_offset_db_fmt_cntl =
2000 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
2001 break;
2002 default:
2003 pa_su_poly_offset_db_fmt_cntl =
2004 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
2005 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
2006 }
2007 }
2008
2009 radeon_set_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
2010 radeon_emit(cs, fui(offset_scale));
2011 radeon_emit(cs, fui(offset_units));
2012 radeon_emit(cs, fui(offset_scale));
2013 radeon_emit(cs, fui(offset_units));
2014
2015 radeon_set_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
2016 pa_su_poly_offset_db_fmt_cntl);
2017 }
2018
evergreen_construct_rat_mask(struct r600_context * rctx,struct r600_cb_misc_state * a,unsigned nr_cbufs)2019 uint32_t evergreen_construct_rat_mask(struct r600_context *rctx, struct r600_cb_misc_state *a,
2020 unsigned nr_cbufs)
2021 {
2022 unsigned base_mask = 0;
2023 unsigned dirty_mask = a->image_rat_enabled_mask;
2024 while (dirty_mask) {
2025 unsigned idx = u_bit_scan(&dirty_mask);
2026 base_mask |= (0xf << (idx * 4));
2027 }
2028 unsigned offset = util_last_bit(a->image_rat_enabled_mask);
2029 dirty_mask = a->buffer_rat_enabled_mask;
2030 while (dirty_mask) {
2031 unsigned idx = u_bit_scan(&dirty_mask);
2032 base_mask |= (0xf << (idx + offset) * 4);
2033 }
2034 return base_mask << (nr_cbufs * 4);
2035 }
2036
evergreen_emit_cb_misc_state(struct r600_context * rctx,struct r600_atom * atom)2037 static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
2038 {
2039 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2040 struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
2041 unsigned fb_colormask = a->bound_cbufs_target_mask;
2042 unsigned ps_colormask = a->ps_color_export_mask;
2043 unsigned rat_colormask = evergreen_construct_rat_mask(rctx, a, a->nr_cbufs);
2044 radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
2045 radeon_emit(cs, (a->blend_colormask & fb_colormask) | rat_colormask); /* R_028238_CB_TARGET_MASK */
2046 /* This must match the used export instructions exactly.
2047 * Other values may lead to undefined behavior and hangs.
2048 */
2049 radeon_emit(cs, ps_colormask); /* R_02823C_CB_SHADER_MASK */
2050 }
2051
evergreen_emit_db_state(struct r600_context * rctx,struct r600_atom * atom)2052 static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
2053 {
2054 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2055 struct r600_db_state *a = (struct r600_db_state*)atom;
2056
2057 if (a->rsurf && a->rsurf->db_htile_surface) {
2058 struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture;
2059 unsigned reloc_idx;
2060
2061 radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
2062 radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
2063 radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
2064 radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
2065 reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
2066 RADEON_USAGE_READWRITE, RADEON_PRIO_SEPARATE_META);
2067 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2068 radeon_emit(cs, reloc_idx);
2069 } else {
2070 radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, 0);
2071 radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0);
2072 }
2073 }
2074
evergreen_emit_db_misc_state(struct r600_context * rctx,struct r600_atom * atom)2075 static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
2076 {
2077 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2078 struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
2079 unsigned db_render_control = 0;
2080 unsigned db_count_control = 0;
2081 unsigned db_render_override =
2082 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
2083 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE);
2084
2085 if (rctx->b.num_occlusion_queries > 0 &&
2086 !a->occlusion_queries_disabled) {
2087 db_count_control |= S_028004_PERFECT_ZPASS_COUNTS(1);
2088 if (rctx->b.chip_class == CAYMAN) {
2089 db_count_control |= S_028004_SAMPLE_RATE(a->log_samples);
2090 }
2091 db_render_override |= S_02800C_NOOP_CULL_DISABLE(1);
2092 } else {
2093 db_count_control |= S_028004_ZPASS_INCREMENT_DISABLE(1);
2094 }
2095
2096 /* This is to fix a lockup when hyperz and alpha test are enabled at
2097 * the same time somehow GPU get confuse on which order to pick for
2098 * z test
2099 */
2100 if (rctx->alphatest_state.sx_alpha_test_control)
2101 db_render_override |= S_02800C_FORCE_SHADER_Z_ORDER(1);
2102
2103 if (a->flush_depthstencil_through_cb) {
2104 assert(a->copy_depth || a->copy_stencil);
2105
2106 db_render_control |= S_028000_DEPTH_COPY_ENABLE(a->copy_depth) |
2107 S_028000_STENCIL_COPY_ENABLE(a->copy_stencil) |
2108 S_028000_COPY_CENTROID(1) |
2109 S_028000_COPY_SAMPLE(a->copy_sample);
2110 } else if (a->flush_depth_inplace || a->flush_stencil_inplace) {
2111 db_render_control |= S_028000_DEPTH_COMPRESS_DISABLE(a->flush_depth_inplace) |
2112 S_028000_STENCIL_COMPRESS_DISABLE(a->flush_stencil_inplace);
2113 db_render_override |= S_02800C_DISABLE_PIXEL_RATE_TILES(1);
2114 }
2115 if (a->htile_clear) {
2116 /* FIXME we might want to disable cliprect here */
2117 db_render_control |= S_028000_DEPTH_CLEAR_ENABLE(1);
2118 }
2119
2120 radeon_set_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2);
2121 radeon_emit(cs, db_render_control); /* R_028000_DB_RENDER_CONTROL */
2122 radeon_emit(cs, db_count_control); /* R_028004_DB_COUNT_CONTROL */
2123 radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override);
2124 radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
2125 }
2126
evergreen_emit_vertex_buffers(struct r600_context * rctx,struct r600_vertexbuf_state * state,unsigned resource_offset,unsigned pkt_flags)2127 static void evergreen_emit_vertex_buffers(struct r600_context *rctx,
2128 struct r600_vertexbuf_state *state,
2129 unsigned resource_offset,
2130 unsigned pkt_flags)
2131 {
2132 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2133 uint32_t dirty_mask = state->dirty_mask;
2134
2135 while (dirty_mask) {
2136 struct pipe_vertex_buffer *vb;
2137 struct r600_resource *rbuffer;
2138 uint64_t va;
2139 unsigned buffer_index = u_bit_scan(&dirty_mask);
2140
2141 vb = &state->vb[buffer_index];
2142 rbuffer = (struct r600_resource*)vb->buffer.resource;
2143 assert(rbuffer);
2144
2145 va = rbuffer->gpu_address + vb->buffer_offset;
2146
2147 /* fetch resources start at index 992 */
2148 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
2149 radeon_emit(cs, (resource_offset + buffer_index) * 8);
2150 radeon_emit(cs, va); /* RESOURCEi_WORD0 */
2151 radeon_emit(cs, rbuffer->b.b.width0 - vb->buffer_offset - 1); /* RESOURCEi_WORD1 */
2152 radeon_emit(cs, /* RESOURCEi_WORD2 */
2153 S_030008_ENDIAN_SWAP(r600_endian_swap(32)) |
2154 S_030008_STRIDE(vb->stride) |
2155 S_030008_BASE_ADDRESS_HI(va >> 32UL));
2156 radeon_emit(cs, /* RESOURCEi_WORD3 */
2157 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) |
2158 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) |
2159 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) |
2160 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W));
2161 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
2162 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
2163 radeon_emit(cs, 0); /* RESOURCEi_WORD6 */
2164 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD7 */
2165
2166 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2167 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2168 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER));
2169 }
2170 state->dirty_mask = 0;
2171 }
2172
evergreen_fs_emit_vertex_buffers(struct r600_context * rctx,struct r600_atom * atom)2173 static void evergreen_fs_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom * atom)
2174 {
2175 evergreen_emit_vertex_buffers(rctx, &rctx->vertex_buffer_state, EG_FETCH_CONSTANTS_OFFSET_FS, 0);
2176 }
2177
evergreen_cs_emit_vertex_buffers(struct r600_context * rctx,struct r600_atom * atom)2178 static void evergreen_cs_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom * atom)
2179 {
2180 evergreen_emit_vertex_buffers(rctx, &rctx->cs_vertex_buffer_state, EG_FETCH_CONSTANTS_OFFSET_CS,
2181 RADEON_CP_PACKET3_COMPUTE_MODE);
2182 }
2183
evergreen_emit_constant_buffers(struct r600_context * rctx,struct r600_constbuf_state * state,unsigned buffer_id_base,unsigned reg_alu_constbuf_size,unsigned reg_alu_const_cache,unsigned pkt_flags)2184 static void evergreen_emit_constant_buffers(struct r600_context *rctx,
2185 struct r600_constbuf_state *state,
2186 unsigned buffer_id_base,
2187 unsigned reg_alu_constbuf_size,
2188 unsigned reg_alu_const_cache,
2189 unsigned pkt_flags)
2190 {
2191 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2192 uint32_t dirty_mask = state->dirty_mask;
2193
2194 while (dirty_mask) {
2195 struct pipe_constant_buffer *cb;
2196 struct r600_resource *rbuffer;
2197 uint64_t va;
2198 unsigned buffer_index = ffs(dirty_mask) - 1;
2199 unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER);
2200
2201 cb = &state->cb[buffer_index];
2202 rbuffer = (struct r600_resource*)cb->buffer;
2203 assert(rbuffer);
2204
2205 va = rbuffer->gpu_address + cb->buffer_offset;
2206
2207 if (buffer_index < R600_MAX_HW_CONST_BUFFERS) {
2208 radeon_set_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4,
2209 DIV_ROUND_UP(cb->buffer_size, 256), pkt_flags);
2210 radeon_set_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8,
2211 pkt_flags);
2212 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2213 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2214 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
2215 }
2216
2217 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
2218 radeon_emit(cs, (buffer_id_base + buffer_index) * 8);
2219 radeon_emit(cs, va); /* RESOURCEi_WORD0 */
2220 radeon_emit(cs, cb->buffer_size -1); /* RESOURCEi_WORD1 */
2221 radeon_emit(cs, /* RESOURCEi_WORD2 */
2222 S_030008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) |
2223 S_030008_STRIDE(gs_ring_buffer ? 4 : 16) |
2224 S_030008_BASE_ADDRESS_HI(va >> 32UL) |
2225 S_030008_DATA_FORMAT(FMT_32_32_32_32_FLOAT));
2226 radeon_emit(cs, /* RESOURCEi_WORD3 */
2227 S_03000C_UNCACHED(gs_ring_buffer ? 1 : 0) |
2228 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) |
2229 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) |
2230 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) |
2231 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W));
2232 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
2233 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
2234 radeon_emit(cs, 0); /* RESOURCEi_WORD6 */
2235 radeon_emit(cs, /* RESOURCEi_WORD7 */
2236 S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER));
2237
2238 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2239 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2240 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
2241
2242 dirty_mask &= ~(1 << buffer_index);
2243 }
2244 state->dirty_mask = 0;
2245 }
2246
2247 /* VS constants can be in VS/ES (same space) or LS if tess is enabled */
evergreen_emit_vs_constant_buffers(struct r600_context * rctx,struct r600_atom * atom)2248 static void evergreen_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2249 {
2250 if (rctx->vs_shader->current->shader.vs_as_ls) {
2251 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX],
2252 EG_FETCH_CONSTANTS_OFFSET_LS,
2253 R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0,
2254 R_028F40_ALU_CONST_CACHE_LS_0,
2255 0 /* PKT3 flags */);
2256 } else {
2257 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX],
2258 EG_FETCH_CONSTANTS_OFFSET_VS,
2259 R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
2260 R_028980_ALU_CONST_CACHE_VS_0,
2261 0 /* PKT3 flags */);
2262 }
2263 }
2264
evergreen_emit_gs_constant_buffers(struct r600_context * rctx,struct r600_atom * atom)2265 static void evergreen_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2266 {
2267 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY],
2268 EG_FETCH_CONSTANTS_OFFSET_GS,
2269 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0,
2270 R_0289C0_ALU_CONST_CACHE_GS_0,
2271 0 /* PKT3 flags */);
2272 }
2273
evergreen_emit_ps_constant_buffers(struct r600_context * rctx,struct r600_atom * atom)2274 static void evergreen_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2275 {
2276 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT],
2277 EG_FETCH_CONSTANTS_OFFSET_PS,
2278 R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
2279 R_028940_ALU_CONST_CACHE_PS_0,
2280 0 /* PKT3 flags */);
2281 }
2282
evergreen_emit_cs_constant_buffers(struct r600_context * rctx,struct r600_atom * atom)2283 static void evergreen_emit_cs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2284 {
2285 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE],
2286 EG_FETCH_CONSTANTS_OFFSET_CS,
2287 R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0,
2288 R_028F40_ALU_CONST_CACHE_LS_0,
2289 RADEON_CP_PACKET3_COMPUTE_MODE);
2290 }
2291
2292 /* tes constants can be emitted to VS or ES - which are common */
evergreen_emit_tes_constant_buffers(struct r600_context * rctx,struct r600_atom * atom)2293 static void evergreen_emit_tes_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2294 {
2295 if (!rctx->tes_shader)
2296 return;
2297 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_EVAL],
2298 EG_FETCH_CONSTANTS_OFFSET_VS,
2299 R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
2300 R_028980_ALU_CONST_CACHE_VS_0,
2301 0);
2302 }
2303
evergreen_emit_tcs_constant_buffers(struct r600_context * rctx,struct r600_atom * atom)2304 static void evergreen_emit_tcs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2305 {
2306 if (!rctx->tes_shader)
2307 return;
2308 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_CTRL],
2309 EG_FETCH_CONSTANTS_OFFSET_HS,
2310 R_028F80_ALU_CONST_BUFFER_SIZE_HS_0,
2311 R_028F00_ALU_CONST_CACHE_HS_0,
2312 0);
2313 }
2314
evergreen_setup_scratch_buffers(struct r600_context * rctx)2315 void evergreen_setup_scratch_buffers(struct r600_context *rctx) {
2316 static const struct {
2317 unsigned ring_base;
2318 unsigned item_size;
2319 unsigned ring_size;
2320 } regs[EG_NUM_HW_STAGES] = {
2321 [R600_HW_STAGE_PS] = { R_008C68_SQ_PSTMP_RING_BASE, R_028914_SQ_PSTMP_RING_ITEMSIZE, R_008C6C_SQ_PSTMP_RING_SIZE },
2322 [R600_HW_STAGE_VS] = { R_008C60_SQ_VSTMP_RING_BASE, R_028910_SQ_VSTMP_RING_ITEMSIZE, R_008C64_SQ_VSTMP_RING_SIZE },
2323 [R600_HW_STAGE_GS] = { R_008C58_SQ_GSTMP_RING_BASE, R_02890C_SQ_GSTMP_RING_ITEMSIZE, R_008C5C_SQ_GSTMP_RING_SIZE },
2324 [R600_HW_STAGE_ES] = { R_008C50_SQ_ESTMP_RING_BASE, R_028908_SQ_ESTMP_RING_ITEMSIZE, R_008C54_SQ_ESTMP_RING_SIZE },
2325 [EG_HW_STAGE_LS] = { R_008E10_SQ_LSTMP_RING_BASE, R_028830_SQ_LSTMP_RING_ITEMSIZE, R_008E14_SQ_LSTMP_RING_SIZE },
2326 [EG_HW_STAGE_HS] = { R_008E18_SQ_HSTMP_RING_BASE, R_028834_SQ_HSTMP_RING_ITEMSIZE, R_008E1C_SQ_HSTMP_RING_SIZE }
2327 };
2328
2329 for (unsigned i = 0; i < EG_NUM_HW_STAGES; i++) {
2330 struct r600_pipe_shader *stage = rctx->hw_shader_stages[i].shader;
2331
2332 if (stage && unlikely(stage->scratch_space_needed)) {
2333 r600_setup_scratch_area_for_shader(rctx, stage,
2334 &rctx->scratch_buffers[i], regs[i].ring_base, regs[i].item_size, regs[i].ring_size);
2335 }
2336 }
2337 }
2338
evergreen_emit_sampler_views(struct r600_context * rctx,struct r600_samplerview_state * state,unsigned resource_id_base,unsigned pkt_flags)2339 static void evergreen_emit_sampler_views(struct r600_context *rctx,
2340 struct r600_samplerview_state *state,
2341 unsigned resource_id_base, unsigned pkt_flags)
2342 {
2343 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2344 uint32_t dirty_mask = state->dirty_mask;
2345
2346 while (dirty_mask) {
2347 struct r600_pipe_sampler_view *rview;
2348 unsigned resource_index = u_bit_scan(&dirty_mask);
2349 unsigned reloc;
2350
2351 rview = state->views[resource_index];
2352 assert(rview);
2353
2354 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
2355 radeon_emit(cs, (resource_id_base + resource_index) * 8);
2356 radeon_emit_array(cs, rview->tex_resource_words, 8);
2357
2358 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource,
2359 RADEON_USAGE_READ,
2360 r600_get_sampler_view_priority(rview->tex_resource));
2361 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2362 radeon_emit(cs, reloc);
2363
2364 if (!rview->skip_mip_address_reloc) {
2365 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2366 radeon_emit(cs, reloc);
2367 }
2368 }
2369 state->dirty_mask = 0;
2370 }
2371
evergreen_emit_vs_sampler_views(struct r600_context * rctx,struct r600_atom * atom)2372 static void evergreen_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2373 {
2374 if (rctx->vs_shader->current->shader.vs_as_ls) {
2375 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views,
2376 EG_FETCH_CONSTANTS_OFFSET_LS + R600_MAX_CONST_BUFFERS, 0);
2377 } else {
2378 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views,
2379 EG_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS, 0);
2380 }
2381 }
2382
evergreen_emit_gs_sampler_views(struct r600_context * rctx,struct r600_atom * atom)2383 static void evergreen_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2384 {
2385 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views,
2386 EG_FETCH_CONSTANTS_OFFSET_GS + R600_MAX_CONST_BUFFERS, 0);
2387 }
2388
evergreen_emit_tcs_sampler_views(struct r600_context * rctx,struct r600_atom * atom)2389 static void evergreen_emit_tcs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2390 {
2391 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].views,
2392 EG_FETCH_CONSTANTS_OFFSET_HS + R600_MAX_CONST_BUFFERS, 0);
2393 }
2394
evergreen_emit_tes_sampler_views(struct r600_context * rctx,struct r600_atom * atom)2395 static void evergreen_emit_tes_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2396 {
2397 if (!rctx->tes_shader)
2398 return;
2399 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].views,
2400 EG_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS, 0);
2401 }
2402
evergreen_emit_ps_sampler_views(struct r600_context * rctx,struct r600_atom * atom)2403 static void evergreen_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2404 {
2405 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views,
2406 EG_FETCH_CONSTANTS_OFFSET_PS + R600_MAX_CONST_BUFFERS, 0);
2407 }
2408
evergreen_emit_cs_sampler_views(struct r600_context * rctx,struct r600_atom * atom)2409 static void evergreen_emit_cs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2410 {
2411 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views,
2412 EG_FETCH_CONSTANTS_OFFSET_CS + R600_MAX_CONST_BUFFERS, RADEON_CP_PACKET3_COMPUTE_MODE);
2413 }
2414
evergreen_convert_border_color(union pipe_color_union * in,union pipe_color_union * out,enum pipe_format format)2415 static void evergreen_convert_border_color(union pipe_color_union *in,
2416 union pipe_color_union *out,
2417 enum pipe_format format)
2418 {
2419 if (util_format_is_pure_integer(format) &&
2420 !util_format_is_depth_or_stencil(format)) {
2421 const struct util_format_description *d = util_format_description(format);
2422
2423 for (int i = 0; i < d->nr_channels; ++i) {
2424 int cs = d->channel[i].size;
2425 if (d->channel[i].type == UTIL_FORMAT_TYPE_SIGNED)
2426 out->f[i] = (double)(in->i[i]) / ((1ul << (cs - 1)) - 1 );
2427 else if (d->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
2428 out->f[i] = (double)(in->ui[i]) / ((1ul << cs) - 1 );
2429 else
2430 out->f[i] = 0;
2431 }
2432
2433 } else {
2434 switch (format) {
2435 case PIPE_FORMAT_X24S8_UINT:
2436 case PIPE_FORMAT_X32_S8X24_UINT:
2437 out->f[0] = (double)(in->ui[0]) / 255.0;
2438 out->f[1] = out->f[2] = out->f[3] = 0.0f;
2439 break;
2440 default:
2441 memcpy(out->f, in->f, 4 * sizeof(float));
2442 }
2443 }
2444 }
2445
evergreen_emit_sampler_states(struct r600_context * rctx,struct r600_textures_info * texinfo,unsigned resource_id_base,unsigned border_index_reg,unsigned pkt_flags)2446 static void evergreen_emit_sampler_states(struct r600_context *rctx,
2447 struct r600_textures_info *texinfo,
2448 unsigned resource_id_base,
2449 unsigned border_index_reg,
2450 unsigned pkt_flags)
2451 {
2452 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2453 uint32_t dirty_mask = texinfo->states.dirty_mask;
2454 union pipe_color_union border_color = {{0,0,0,1}};
2455 union pipe_color_union *border_color_ptr = &border_color;
2456
2457 while (dirty_mask) {
2458 struct r600_pipe_sampler_state *rstate;
2459 unsigned i = u_bit_scan(&dirty_mask);
2460
2461 rstate = texinfo->states.states[i];
2462 assert(rstate);
2463
2464 if (rstate->border_color_use) {
2465 struct r600_pipe_sampler_view *rview = texinfo->views.views[i];
2466 if (rview) {
2467 evergreen_convert_border_color(&rstate->border_color,
2468 &border_color, rview->base.format);
2469 } else {
2470 border_color_ptr = &rstate->border_color;
2471 }
2472 }
2473
2474 radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0) | pkt_flags);
2475 radeon_emit(cs, (resource_id_base + i) * 3);
2476 radeon_emit_array(cs, rstate->tex_sampler_words, 3);
2477
2478 if (rstate->border_color_use) {
2479 radeon_set_config_reg_seq(cs, border_index_reg, 5);
2480 radeon_emit(cs, i);
2481 radeon_emit_array(cs, border_color_ptr->ui, 4);
2482 }
2483 }
2484 texinfo->states.dirty_mask = 0;
2485 }
2486
evergreen_emit_vs_sampler_states(struct r600_context * rctx,struct r600_atom * atom)2487 static void evergreen_emit_vs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2488 {
2489 if (rctx->vs_shader->current->shader.vs_as_ls) {
2490 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 72,
2491 R_00A450_TD_LS_SAMPLER0_BORDER_COLOR_INDEX, 0);
2492 } else {
2493 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 18,
2494 R_00A414_TD_VS_SAMPLER0_BORDER_INDEX, 0);
2495 }
2496 }
2497
evergreen_emit_gs_sampler_states(struct r600_context * rctx,struct r600_atom * atom)2498 static void evergreen_emit_gs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2499 {
2500 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY], 36,
2501 R_00A428_TD_GS_SAMPLER0_BORDER_INDEX, 0);
2502 }
2503
evergreen_emit_tcs_sampler_states(struct r600_context * rctx,struct r600_atom * atom)2504 static void evergreen_emit_tcs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2505 {
2506 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL], 54,
2507 R_00A43C_TD_HS_SAMPLER0_BORDER_COLOR_INDEX, 0);
2508 }
2509
evergreen_emit_tes_sampler_states(struct r600_context * rctx,struct r600_atom * atom)2510 static void evergreen_emit_tes_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2511 {
2512 if (!rctx->tes_shader)
2513 return;
2514 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL], 18,
2515 R_00A414_TD_VS_SAMPLER0_BORDER_INDEX, 0);
2516 }
2517
evergreen_emit_ps_sampler_states(struct r600_context * rctx,struct r600_atom * atom)2518 static void evergreen_emit_ps_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2519 {
2520 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT], 0,
2521 R_00A400_TD_PS_SAMPLER0_BORDER_INDEX, 0);
2522 }
2523
evergreen_emit_cs_sampler_states(struct r600_context * rctx,struct r600_atom * atom)2524 static void evergreen_emit_cs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2525 {
2526 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE], 90,
2527 R_00A464_TD_CS_SAMPLER0_BORDER_INDEX,
2528 RADEON_CP_PACKET3_COMPUTE_MODE);
2529 }
2530
evergreen_emit_sample_mask(struct r600_context * rctx,struct r600_atom * a)2531 static void evergreen_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
2532 {
2533 struct r600_sample_mask *s = (struct r600_sample_mask*)a;
2534 uint8_t mask = s->sample_mask;
2535
2536 radeon_set_context_reg(&rctx->b.gfx.cs, R_028C3C_PA_SC_AA_MASK,
2537 mask | (mask << 8) | (mask << 16) | (mask << 24));
2538 }
2539
cayman_emit_sample_mask(struct r600_context * rctx,struct r600_atom * a)2540 static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
2541 {
2542 struct r600_sample_mask *s = (struct r600_sample_mask*)a;
2543 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2544 uint16_t mask = s->sample_mask;
2545
2546 radeon_set_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
2547 radeon_emit(cs, mask | (mask << 16)); /* X0Y0_X1Y0 */
2548 radeon_emit(cs, mask | (mask << 16)); /* X0Y1_X1Y1 */
2549 }
2550
evergreen_emit_vertex_fetch_shader(struct r600_context * rctx,struct r600_atom * a)2551 static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
2552 {
2553 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2554 struct r600_cso_state *state = (struct r600_cso_state*)a;
2555 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
2556
2557 if (!shader)
2558 return;
2559
2560 radeon_set_context_reg(cs, R_0288A4_SQ_PGM_START_FS,
2561 (shader->buffer->gpu_address + shader->offset) >> 8);
2562 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2563 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer,
2564 RADEON_USAGE_READ,
2565 RADEON_PRIO_SHADER_BINARY));
2566 }
2567
evergreen_emit_shader_stages(struct r600_context * rctx,struct r600_atom * a)2568 static void evergreen_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
2569 {
2570 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2571 struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
2572
2573 uint32_t v = 0, v2 = 0, primid = 0, tf_param = 0;
2574
2575 if (rctx->vs_shader->current->shader.vs_as_gs_a) {
2576 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
2577 primid = 1;
2578 }
2579
2580 if (state->geom_enable) {
2581 uint32_t cut_val;
2582
2583 if (rctx->gs_shader->gs_max_out_vertices <= 128)
2584 cut_val = V_028A40_GS_CUT_128;
2585 else if (rctx->gs_shader->gs_max_out_vertices <= 256)
2586 cut_val = V_028A40_GS_CUT_256;
2587 else if (rctx->gs_shader->gs_max_out_vertices <= 512)
2588 cut_val = V_028A40_GS_CUT_512;
2589 else
2590 cut_val = V_028A40_GS_CUT_1024;
2591
2592 v = S_028B54_GS_EN(1) |
2593 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
2594 if (!rctx->tes_shader)
2595 v |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
2596
2597 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
2598 S_028A40_CUT_MODE(cut_val);
2599
2600 if (rctx->gs_shader->current->shader.gs_prim_id_input)
2601 primid = 1;
2602 }
2603
2604 if (rctx->tes_shader) {
2605 uint32_t type, partitioning, topology;
2606 struct tgsi_shader_info *info = &rctx->tes_shader->current->selector->info;
2607 unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
2608 unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
2609 bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
2610 bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE];
2611 switch (tes_prim_mode) {
2612 case PIPE_PRIM_LINES:
2613 type = V_028B6C_TESS_ISOLINE;
2614 break;
2615 case PIPE_PRIM_TRIANGLES:
2616 type = V_028B6C_TESS_TRIANGLE;
2617 break;
2618 case PIPE_PRIM_QUADS:
2619 type = V_028B6C_TESS_QUAD;
2620 break;
2621 default:
2622 assert(0);
2623 return;
2624 }
2625
2626 switch (tes_spacing) {
2627 case PIPE_TESS_SPACING_FRACTIONAL_ODD:
2628 partitioning = V_028B6C_PART_FRAC_ODD;
2629 break;
2630 case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
2631 partitioning = V_028B6C_PART_FRAC_EVEN;
2632 break;
2633 case PIPE_TESS_SPACING_EQUAL:
2634 partitioning = V_028B6C_PART_INTEGER;
2635 break;
2636 default:
2637 assert(0);
2638 return;
2639 }
2640
2641 if (tes_point_mode)
2642 topology = V_028B6C_OUTPUT_POINT;
2643 else if (tes_prim_mode == PIPE_PRIM_LINES)
2644 topology = V_028B6C_OUTPUT_LINE;
2645 else if (tes_vertex_order_cw)
2646 /* XXX follow radeonsi and invert */
2647 topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
2648 else
2649 topology = V_028B6C_OUTPUT_TRIANGLE_CW;
2650
2651 tf_param = S_028B6C_TYPE(type) |
2652 S_028B6C_PARTITIONING(partitioning) |
2653 S_028B6C_TOPOLOGY(topology);
2654 }
2655
2656 if (rctx->tes_shader) {
2657 v |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) |
2658 S_028B54_HS_EN(1);
2659 if (!state->geom_enable)
2660 v |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
2661 else
2662 v |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
2663 }
2664
2665 radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, v ? 1 : 0 );
2666 radeon_set_context_reg(cs, R_028B54_VGT_SHADER_STAGES_EN, v);
2667 radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
2668 radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
2669 radeon_set_context_reg(cs, R_028B6C_VGT_TF_PARAM, tf_param);
2670 }
2671
evergreen_emit_gs_rings(struct r600_context * rctx,struct r600_atom * a)2672 static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
2673 {
2674 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2675 struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
2676 struct r600_resource *rbuffer;
2677
2678 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
2679 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2680 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
2681
2682 if (state->enable) {
2683 rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
2684 radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
2685 rbuffer->gpu_address >> 8);
2686 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2687 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2688 RADEON_USAGE_READWRITE,
2689 RADEON_PRIO_SHADER_RINGS));
2690 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
2691 state->esgs_ring.buffer_size >> 8);
2692
2693 rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
2694 radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
2695 rbuffer->gpu_address >> 8);
2696 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2697 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2698 RADEON_USAGE_READWRITE,
2699 RADEON_PRIO_SHADER_RINGS));
2700 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
2701 state->gsvs_ring.buffer_size >> 8);
2702 } else {
2703 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
2704 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
2705 }
2706
2707 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
2708 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2709 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
2710 }
2711
cayman_init_common_regs(struct r600_command_buffer * cb,enum chip_class ctx_chip_class,enum radeon_family ctx_family,int ctx_drm_minor)2712 void cayman_init_common_regs(struct r600_command_buffer *cb,
2713 enum chip_class ctx_chip_class,
2714 enum radeon_family ctx_family,
2715 int ctx_drm_minor)
2716 {
2717 r600_store_config_reg_seq(cb, R_008C00_SQ_CONFIG, 2);
2718 r600_store_value(cb, S_008C00_EXPORT_SRC_C(1)); /* R_008C00_SQ_CONFIG */
2719 /* always set the temp clauses */
2720 r600_store_value(cb, S_008C04_NUM_CLAUSE_TEMP_GPRS(4)); /* R_008C04_SQ_GPR_RESOURCE_MGMT_1 */
2721
2722 r600_store_config_reg_seq(cb, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1, 2);
2723 r600_store_value(cb, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */
2724 r600_store_value(cb, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */
2725
2726 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (1 << 8));
2727
2728 r600_store_context_reg_seq(cb, R_028350_SX_MISC, 2);
2729 r600_store_value(cb, 0);
2730 r600_store_value(cb, S_028354_SURFACE_SYNC_MASK(0xf));
2731
2732 r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0);
2733 }
2734
cayman_init_atom_start_cs(struct r600_context * rctx)2735 static void cayman_init_atom_start_cs(struct r600_context *rctx)
2736 {
2737 struct r600_command_buffer *cb = &rctx->start_cs_cmd;
2738 int i;
2739
2740 r600_init_command_buffer(cb, 338);
2741
2742 /* This must be first. */
2743 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
2744 r600_store_value(cb, 0x80000000);
2745 r600_store_value(cb, 0x80000000);
2746
2747 /* We're setting config registers here. */
2748 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2749 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2750
2751 /* This enables pipeline stat & streamout queries.
2752 * They are only disabled by blits.
2753 */
2754 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2755 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0));
2756
2757 cayman_init_common_regs(cb, rctx->b.chip_class,
2758 rctx->b.family, rctx->screen->b.info.drm_minor);
2759
2760 r600_store_config_reg(cb, R_009100_SPI_CONFIG_CNTL, 0);
2761 r600_store_config_reg(cb, R_00913C_SPI_CONFIG_CNTL_1, S_00913C_VTX_DONE_DELAY(4));
2762
2763 /* remove LS/HS from one SIMD for hw workaround */
2764 r600_store_config_reg_seq(cb, R_008E20_SQ_STATIC_THREAD_MGMT1, 3);
2765 r600_store_value(cb, 0xffffffff);
2766 r600_store_value(cb, 0xffffffff);
2767 r600_store_value(cb, 0xfffffffe);
2768
2769 r600_store_context_reg_seq(cb, R_028900_SQ_ESGS_RING_ITEMSIZE, 6);
2770 r600_store_value(cb, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */
2771 r600_store_value(cb, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */
2772 r600_store_value(cb, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */
2773 r600_store_value(cb, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */
2774 r600_store_value(cb, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */
2775 r600_store_value(cb, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */
2776
2777 r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4);
2778 r600_store_value(cb, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */
2779 r600_store_value(cb, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */
2780 r600_store_value(cb, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */
2781 r600_store_value(cb, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */
2782
2783 r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13);
2784 r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
2785 r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */
2786 r600_store_value(cb, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
2787 r600_store_value(cb, fui(0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
2788 r600_store_value(cb, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */
2789 r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
2790 r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
2791 r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */
2792 r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
2793 r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
2794 r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
2795 r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
2796 r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE */
2797
2798 r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0);
2799
2800 r600_store_config_reg(cb, R_008A14_PA_CL_ENHANCE, (3 << 1) | 1);
2801
2802 r600_store_context_reg_seq(cb, CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
2803 r600_store_value(cb, 0x76543210); /* CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0 */
2804 r600_store_value(cb, 0xfedcba98); /* CM_R_028BD8_PA_SC_CENTROID_PRIORITY_1 */
2805
2806 r600_store_context_reg(cb, R_028724_GDS_ADDR_SIZE, 0x3fff);
2807 r600_store_context_reg_seq(cb, R_0288E8_SQ_LDS_ALLOC, 2);
2808 r600_store_value(cb, 0); /* R_0288E8_SQ_LDS_ALLOC */
2809 r600_store_value(cb, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */
2810
2811 r600_store_context_reg(cb, R_0288F0_SQ_VTX_SEMANTIC_CLEAR, ~0);
2812
2813 r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2);
2814 r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */
2815 r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */
2816
2817 r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
2818
2819 r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0);
2820
2821 r600_store_context_reg(cb, R_0286DC_SPI_FOG_CNTL, 0);
2822
2823 r600_store_context_reg_seq(cb, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 3);
2824 r600_store_value(cb, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */
2825 r600_store_value(cb, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */
2826 r600_store_value(cb, 0); /* R_028AC8_DB_PRELOAD_CONTROL */
2827
2828 r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0);
2829 r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
2830
2831 r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
2832 r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0);
2833
2834 r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2);
2835 r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
2836 r600_store_value(cb, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
2837
2838 r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2);
2839 r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
2840 r600_store_value(cb, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
2841
2842 r600_store_context_reg(cb, R_028848_SQ_PGM_RESOURCES_2_PS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2843 r600_store_context_reg(cb, R_028864_SQ_PGM_RESOURCES_2_VS, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2844 r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_2_GS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2845 r600_store_context_reg(cb, R_028894_SQ_PGM_RESOURCES_2_ES, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2846 r600_store_context_reg(cb, R_0288C0_SQ_PGM_RESOURCES_2_HS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2847 r600_store_context_reg(cb, R_0288D8_SQ_PGM_RESOURCES_2_LS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2848
2849 r600_store_context_reg(cb, R_0288A8_SQ_PGM_RESOURCES_FS, 0);
2850
2851 /* to avoid GPU doing any preloading of constant from random address */
2852 r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16);
2853 for (i = 0; i < 16; i++)
2854 r600_store_value(cb, 0);
2855
2856 r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16);
2857 for (i = 0; i < 16; i++)
2858 r600_store_value(cb, 0);
2859
2860 r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16);
2861 for (i = 0; i < 16; i++)
2862 r600_store_value(cb, 0);
2863
2864 r600_store_context_reg_seq(cb, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 16);
2865 for (i = 0; i < 16; i++)
2866 r600_store_value(cb, 0);
2867
2868 r600_store_context_reg_seq(cb, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0, 16);
2869 for (i = 0; i < 16; i++)
2870 r600_store_value(cb, 0);
2871
2872 if (rctx->screen->b.has_streamout) {
2873 r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
2874 }
2875
2876 r600_store_context_reg(cb, R_028010_DB_RENDER_OVERRIDE2, 0);
2877 r600_store_context_reg(cb, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
2878 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0);
2879 r600_store_context_reg_seq(cb, R_0286E4_SPI_PS_IN_CONTROL_2, 2);
2880 r600_store_value(cb, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */
2881 r600_store_value(cb, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */
2882
2883 r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 2);
2884 r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */
2885 r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */
2886 r600_store_context_reg(cb, R_028B6C_VGT_TF_PARAM, 0);
2887 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0, 0x01000FFF);
2888 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF);
2889 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (64 * 4), 0x01000FFF);
2890 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (96 * 4), 0x01000FFF);
2891 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (128 * 4), 0x01000FFF);
2892 }
2893
evergreen_init_common_regs(struct r600_context * rctx,struct r600_command_buffer * cb,enum chip_class ctx_chip_class,enum radeon_family ctx_family,int ctx_drm_minor)2894 void evergreen_init_common_regs(struct r600_context *rctx, struct r600_command_buffer *cb,
2895 enum chip_class ctx_chip_class,
2896 enum radeon_family ctx_family,
2897 int ctx_drm_minor)
2898 {
2899 int ps_prio;
2900 int vs_prio;
2901 int gs_prio;
2902 int es_prio;
2903
2904 int hs_prio;
2905 int cs_prio;
2906 int ls_prio;
2907
2908 unsigned tmp;
2909
2910 ps_prio = 0;
2911 vs_prio = 1;
2912 gs_prio = 2;
2913 es_prio = 3;
2914 hs_prio = 3;
2915 ls_prio = 3;
2916 cs_prio = 0;
2917
2918 rctx->default_gprs[R600_HW_STAGE_PS] = 93;
2919 rctx->default_gprs[R600_HW_STAGE_VS] = 46;
2920 rctx->r6xx_num_clause_temp_gprs = 4;
2921 rctx->default_gprs[R600_HW_STAGE_GS] = 31;
2922 rctx->default_gprs[R600_HW_STAGE_ES] = 31;
2923 rctx->default_gprs[EG_HW_STAGE_HS] = 23;
2924 rctx->default_gprs[EG_HW_STAGE_LS] = 23;
2925
2926 tmp = 0;
2927 switch (ctx_family) {
2928 case CHIP_CEDAR:
2929 case CHIP_PALM:
2930 case CHIP_SUMO:
2931 case CHIP_SUMO2:
2932 case CHIP_CAICOS:
2933 break;
2934 default:
2935 tmp |= S_008C00_VC_ENABLE(1);
2936 break;
2937 }
2938 tmp |= S_008C00_EXPORT_SRC_C(1);
2939 tmp |= S_008C00_CS_PRIO(cs_prio);
2940 tmp |= S_008C00_LS_PRIO(ls_prio);
2941 tmp |= S_008C00_HS_PRIO(hs_prio);
2942 tmp |= S_008C00_PS_PRIO(ps_prio);
2943 tmp |= S_008C00_VS_PRIO(vs_prio);
2944 tmp |= S_008C00_GS_PRIO(gs_prio);
2945 tmp |= S_008C00_ES_PRIO(es_prio);
2946
2947 r600_store_config_reg_seq(cb, R_008C00_SQ_CONFIG, 1);
2948 r600_store_value(cb, tmp); /* R_008C00_SQ_CONFIG */
2949
2950 r600_store_config_reg_seq(cb, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1, 2);
2951 r600_store_value(cb, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */
2952 r600_store_value(cb, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */
2953
2954 /* The cs checker requires this register to be set. */
2955 r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0);
2956
2957 r600_store_context_reg_seq(cb, R_028350_SX_MISC, 2);
2958 r600_store_value(cb, 0);
2959 r600_store_value(cb, S_028354_SURFACE_SYNC_MASK(0xf));
2960
2961 return;
2962 }
2963
evergreen_init_atom_start_cs(struct r600_context * rctx)2964 void evergreen_init_atom_start_cs(struct r600_context *rctx)
2965 {
2966 struct r600_command_buffer *cb = &rctx->start_cs_cmd;
2967 int num_ps_threads;
2968 int num_vs_threads;
2969 int num_gs_threads;
2970 int num_es_threads;
2971 int num_hs_threads;
2972 int num_ls_threads;
2973
2974 int num_ps_stack_entries;
2975 int num_vs_stack_entries;
2976 int num_gs_stack_entries;
2977 int num_es_stack_entries;
2978 int num_hs_stack_entries;
2979 int num_ls_stack_entries;
2980 enum radeon_family family;
2981 unsigned tmp, i;
2982
2983 if (rctx->b.chip_class == CAYMAN) {
2984 cayman_init_atom_start_cs(rctx);
2985 return;
2986 }
2987
2988 r600_init_command_buffer(cb, 338);
2989
2990 /* This must be first. */
2991 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
2992 r600_store_value(cb, 0x80000000);
2993 r600_store_value(cb, 0x80000000);
2994
2995 /* We're setting config registers here. */
2996 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2997 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2998
2999 /* This enables pipeline stat & streamout queries.
3000 * They are only disabled by blits.
3001 */
3002 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
3003 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0));
3004
3005 evergreen_init_common_regs(rctx, cb, rctx->b.chip_class,
3006 rctx->b.family, rctx->screen->b.info.drm_minor);
3007
3008 family = rctx->b.family;
3009 switch (family) {
3010 case CHIP_CEDAR:
3011 default:
3012 num_ps_threads = 96;
3013 num_vs_threads = 16;
3014 num_gs_threads = 16;
3015 num_es_threads = 16;
3016 num_hs_threads = 16;
3017 num_ls_threads = 16;
3018 num_ps_stack_entries = 42;
3019 num_vs_stack_entries = 42;
3020 num_gs_stack_entries = 42;
3021 num_es_stack_entries = 42;
3022 num_hs_stack_entries = 42;
3023 num_ls_stack_entries = 42;
3024 break;
3025 case CHIP_REDWOOD:
3026 num_ps_threads = 128;
3027 num_vs_threads = 20;
3028 num_gs_threads = 20;
3029 num_es_threads = 20;
3030 num_hs_threads = 20;
3031 num_ls_threads = 20;
3032 num_ps_stack_entries = 42;
3033 num_vs_stack_entries = 42;
3034 num_gs_stack_entries = 42;
3035 num_es_stack_entries = 42;
3036 num_hs_stack_entries = 42;
3037 num_ls_stack_entries = 42;
3038 break;
3039 case CHIP_JUNIPER:
3040 num_ps_threads = 128;
3041 num_vs_threads = 20;
3042 num_gs_threads = 20;
3043 num_es_threads = 20;
3044 num_hs_threads = 20;
3045 num_ls_threads = 20;
3046 num_ps_stack_entries = 85;
3047 num_vs_stack_entries = 85;
3048 num_gs_stack_entries = 85;
3049 num_es_stack_entries = 85;
3050 num_hs_stack_entries = 85;
3051 num_ls_stack_entries = 85;
3052 break;
3053 case CHIP_CYPRESS:
3054 case CHIP_HEMLOCK:
3055 num_ps_threads = 128;
3056 num_vs_threads = 20;
3057 num_gs_threads = 20;
3058 num_es_threads = 20;
3059 num_hs_threads = 20;
3060 num_ls_threads = 20;
3061 num_ps_stack_entries = 85;
3062 num_vs_stack_entries = 85;
3063 num_gs_stack_entries = 85;
3064 num_es_stack_entries = 85;
3065 num_hs_stack_entries = 85;
3066 num_ls_stack_entries = 85;
3067 break;
3068 case CHIP_PALM:
3069 num_ps_threads = 96;
3070 num_vs_threads = 16;
3071 num_gs_threads = 16;
3072 num_es_threads = 16;
3073 num_hs_threads = 16;
3074 num_ls_threads = 16;
3075 num_ps_stack_entries = 42;
3076 num_vs_stack_entries = 42;
3077 num_gs_stack_entries = 42;
3078 num_es_stack_entries = 42;
3079 num_hs_stack_entries = 42;
3080 num_ls_stack_entries = 42;
3081 break;
3082 case CHIP_SUMO:
3083 num_ps_threads = 96;
3084 num_vs_threads = 25;
3085 num_gs_threads = 25;
3086 num_es_threads = 25;
3087 num_hs_threads = 16;
3088 num_ls_threads = 16;
3089 num_ps_stack_entries = 42;
3090 num_vs_stack_entries = 42;
3091 num_gs_stack_entries = 42;
3092 num_es_stack_entries = 42;
3093 num_hs_stack_entries = 42;
3094 num_ls_stack_entries = 42;
3095 break;
3096 case CHIP_SUMO2:
3097 num_ps_threads = 96;
3098 num_vs_threads = 25;
3099 num_gs_threads = 25;
3100 num_es_threads = 25;
3101 num_hs_threads = 16;
3102 num_ls_threads = 16;
3103 num_ps_stack_entries = 85;
3104 num_vs_stack_entries = 85;
3105 num_gs_stack_entries = 85;
3106 num_es_stack_entries = 85;
3107 num_hs_stack_entries = 85;
3108 num_ls_stack_entries = 85;
3109 break;
3110 case CHIP_BARTS:
3111 num_ps_threads = 128;
3112 num_vs_threads = 20;
3113 num_gs_threads = 20;
3114 num_es_threads = 20;
3115 num_hs_threads = 20;
3116 num_ls_threads = 20;
3117 num_ps_stack_entries = 85;
3118 num_vs_stack_entries = 85;
3119 num_gs_stack_entries = 85;
3120 num_es_stack_entries = 85;
3121 num_hs_stack_entries = 85;
3122 num_ls_stack_entries = 85;
3123 break;
3124 case CHIP_TURKS:
3125 num_ps_threads = 128;
3126 num_vs_threads = 20;
3127 num_gs_threads = 20;
3128 num_es_threads = 20;
3129 num_hs_threads = 20;
3130 num_ls_threads = 20;
3131 num_ps_stack_entries = 42;
3132 num_vs_stack_entries = 42;
3133 num_gs_stack_entries = 42;
3134 num_es_stack_entries = 42;
3135 num_hs_stack_entries = 42;
3136 num_ls_stack_entries = 42;
3137 break;
3138 case CHIP_CAICOS:
3139 num_ps_threads = 96;
3140 num_vs_threads = 10;
3141 num_gs_threads = 10;
3142 num_es_threads = 10;
3143 num_hs_threads = 10;
3144 num_ls_threads = 10;
3145 num_ps_stack_entries = 42;
3146 num_vs_stack_entries = 42;
3147 num_gs_stack_entries = 42;
3148 num_es_stack_entries = 42;
3149 num_hs_stack_entries = 42;
3150 num_ls_stack_entries = 42;
3151 break;
3152 }
3153
3154 tmp = S_008C18_NUM_PS_THREADS(num_ps_threads);
3155 tmp |= S_008C18_NUM_VS_THREADS(num_vs_threads);
3156 tmp |= S_008C18_NUM_GS_THREADS(num_gs_threads);
3157 tmp |= S_008C18_NUM_ES_THREADS(num_es_threads);
3158
3159 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
3160 r600_store_value(cb, tmp); /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1 */
3161
3162 tmp = S_008C1C_NUM_HS_THREADS(num_hs_threads);
3163 tmp |= S_008C1C_NUM_LS_THREADS(num_ls_threads);
3164 r600_store_value(cb, tmp); /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2 */
3165
3166 tmp = S_008C20_NUM_PS_STACK_ENTRIES(num_ps_stack_entries);
3167 tmp |= S_008C20_NUM_VS_STACK_ENTRIES(num_vs_stack_entries);
3168 r600_store_value(cb, tmp); /* R_008C20_SQ_STACK_RESOURCE_MGMT_1 */
3169
3170 tmp = S_008C24_NUM_GS_STACK_ENTRIES(num_gs_stack_entries);
3171 tmp |= S_008C24_NUM_ES_STACK_ENTRIES(num_es_stack_entries);
3172 r600_store_value(cb, tmp); /* R_008C24_SQ_STACK_RESOURCE_MGMT_2 */
3173
3174 tmp = S_008C28_NUM_HS_STACK_ENTRIES(num_hs_stack_entries);
3175 tmp |= S_008C28_NUM_LS_STACK_ENTRIES(num_ls_stack_entries);
3176 r600_store_value(cb, tmp); /* R_008C28_SQ_STACK_RESOURCE_MGMT_3 */
3177
3178 r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT,
3179 S_008E2C_NUM_PS_LDS(0x1000) | S_008E2C_NUM_LS_LDS(0x1000));
3180
3181 /* remove LS/HS from one SIMD for hw workaround */
3182 r600_store_config_reg_seq(cb, R_008E20_SQ_STATIC_THREAD_MGMT1, 3);
3183 r600_store_value(cb, 0xffffffff);
3184 r600_store_value(cb, 0xffffffff);
3185 r600_store_value(cb, 0xfffffffe);
3186
3187 r600_store_config_reg(cb, R_009100_SPI_CONFIG_CNTL, 0);
3188 r600_store_config_reg(cb, R_00913C_SPI_CONFIG_CNTL_1, S_00913C_VTX_DONE_DELAY(4));
3189
3190 r600_store_context_reg_seq(cb, R_028900_SQ_ESGS_RING_ITEMSIZE, 6);
3191 r600_store_value(cb, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */
3192 r600_store_value(cb, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */
3193 r600_store_value(cb, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */
3194 r600_store_value(cb, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */
3195 r600_store_value(cb, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */
3196 r600_store_value(cb, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */
3197
3198 r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4);
3199 r600_store_value(cb, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */
3200 r600_store_value(cb, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */
3201 r600_store_value(cb, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */
3202 r600_store_value(cb, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */
3203
3204 r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13);
3205 r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
3206 r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */
3207 r600_store_value(cb, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
3208 r600_store_value(cb, fui(1.0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
3209 r600_store_value(cb, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */
3210 r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
3211 r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
3212 r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */
3213 r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
3214 r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
3215 r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
3216 r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
3217 r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE */
3218
3219 r600_store_config_reg(cb, R_008A14_PA_CL_ENHANCE, (3 << 1) | 1);
3220
3221 r600_store_context_reg(cb, R_0288F0_SQ_VTX_SEMANTIC_CLEAR, ~0);
3222
3223 r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2);
3224 r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */
3225 r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */
3226
3227 r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
3228
3229 r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0);
3230
3231 r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0);
3232 r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
3233 r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
3234
3235 r600_store_context_reg(cb, R_0286DC_SPI_FOG_CNTL, 0);
3236 r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0);
3237
3238 r600_store_context_reg_seq(cb, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 3);
3239 r600_store_value(cb, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */
3240 r600_store_value(cb, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */
3241 r600_store_value(cb, 0); /* R_028AC8_DB_PRELOAD_CONTROL */
3242
3243 r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2);
3244 r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
3245 r600_store_value(cb, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
3246
3247 r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2);
3248 r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
3249 r600_store_value(cb, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
3250
3251 r600_store_context_reg(cb, R_028848_SQ_PGM_RESOURCES_2_PS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3252 r600_store_context_reg(cb, R_028864_SQ_PGM_RESOURCES_2_VS, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3253 r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_2_GS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3254 r600_store_context_reg(cb, R_028894_SQ_PGM_RESOURCES_2_ES, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3255 r600_store_context_reg(cb, R_0288A8_SQ_PGM_RESOURCES_FS, 0);
3256 r600_store_context_reg(cb, R_0288C0_SQ_PGM_RESOURCES_2_HS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3257 r600_store_context_reg(cb, R_0288D8_SQ_PGM_RESOURCES_2_LS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3258
3259 /* to avoid GPU doing any preloading of constant from random address */
3260 r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16);
3261 for (i = 0; i < 16; i++)
3262 r600_store_value(cb, 0);
3263
3264 r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16);
3265 for (i = 0; i < 16; i++)
3266 r600_store_value(cb, 0);
3267
3268 r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16);
3269 for (i = 0; i < 16; i++)
3270 r600_store_value(cb, 0);
3271
3272 r600_store_context_reg_seq(cb, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 16);
3273 for (i = 0; i < 16; i++)
3274 r600_store_value(cb, 0);
3275
3276 r600_store_context_reg_seq(cb, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0, 16);
3277 for (i = 0; i < 16; i++)
3278 r600_store_value(cb, 0);
3279
3280 r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0);
3281
3282 if (rctx->screen->b.has_streamout) {
3283 r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
3284 }
3285
3286 r600_store_context_reg(cb, R_028010_DB_RENDER_OVERRIDE2, 0);
3287 r600_store_context_reg(cb, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
3288 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0);
3289 r600_store_context_reg_seq(cb, R_0286E4_SPI_PS_IN_CONTROL_2, 2);
3290 r600_store_value(cb, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */
3291 r600_store_value(cb, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */
3292
3293 r600_store_context_reg_seq(cb, R_0288E8_SQ_LDS_ALLOC, 2);
3294 r600_store_value(cb, 0); /* R_0288E8_SQ_LDS_ALLOC */
3295 r600_store_value(cb, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */
3296
3297 if (rctx->b.family == CHIP_CAICOS) {
3298 r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 2);
3299 r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */
3300 r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */
3301 r600_store_context_reg(cb, R_028B6C_VGT_TF_PARAM, 0);
3302 } else {
3303 r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 7);
3304 r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */
3305 r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */
3306 r600_store_value(cb, 0); /* R028B5C_VGT_LS_SIZE */
3307 r600_store_value(cb, 0); /* R028B60_VGT_HS_SIZE */
3308 r600_store_value(cb, 0); /* R028B64_VGT_LS_HS_ALLOC */
3309 r600_store_value(cb, 0); /* R028B68_VGT_HS_PATCH_CONST */
3310 r600_store_value(cb, 0); /* R028B68_VGT_TF_PARAM */
3311 }
3312
3313 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0, 0x01000FFF);
3314 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF);
3315 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (64 * 4), 0x01000FFF);
3316 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (96 * 4), 0x01000FFF);
3317 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (128 * 4), 0x01000FFF);
3318 }
3319
evergreen_update_ps_state(struct pipe_context * ctx,struct r600_pipe_shader * shader)3320 void evergreen_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3321 {
3322 struct r600_context *rctx = (struct r600_context *)ctx;
3323 struct r600_command_buffer *cb = &shader->command_buffer;
3324 struct r600_shader *rshader = &shader->shader;
3325 unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control = 0;
3326 int pos_index = -1, face_index = -1, fixed_pt_position_index = -1;
3327 int ninterp = 0;
3328 boolean have_perspective = FALSE, have_linear = FALSE;
3329 static const unsigned spi_baryc_enable_bit[6] = {
3330 S_0286E0_PERSP_SAMPLE_ENA(1),
3331 S_0286E0_PERSP_CENTER_ENA(1),
3332 S_0286E0_PERSP_CENTROID_ENA(1),
3333 S_0286E0_LINEAR_SAMPLE_ENA(1),
3334 S_0286E0_LINEAR_CENTER_ENA(1),
3335 S_0286E0_LINEAR_CENTROID_ENA(1)
3336 };
3337 unsigned spi_baryc_cntl = 0, sid, tmp, num = 0;
3338 unsigned z_export = 0, stencil_export = 0, mask_export = 0;
3339 unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0;
3340 uint32_t spi_ps_input_cntl[32];
3341
3342 if (!cb->buf) {
3343 r600_init_command_buffer(cb, 64);
3344 } else {
3345 cb->num_dw = 0;
3346 }
3347
3348 for (i = 0; i < rshader->ninput; i++) {
3349 /* evergreen NUM_INTERP only contains values interpolated into the LDS,
3350 POSITION goes via GPRs from the SC so isn't counted */
3351 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION)
3352 pos_index = i;
3353 else if (rshader->input[i].name == TGSI_SEMANTIC_FACE) {
3354 if (face_index == -1)
3355 face_index = i;
3356 }
3357 else if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
3358 if (face_index == -1)
3359 face_index = i; /* lives in same register, same enable bit */
3360 }
3361 else if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEID) {
3362 fixed_pt_position_index = i;
3363 }
3364 else {
3365 ninterp++;
3366 int k = eg_get_interpolator_index(
3367 rshader->input[i].interpolate,
3368 rshader->input[i].interpolate_location);
3369 if (k >= 0) {
3370 spi_baryc_cntl |= spi_baryc_enable_bit[k];
3371 have_perspective |= k < 3;
3372 have_linear |= !(k < 3);
3373 if (rshader->input[i].uses_interpolate_at_centroid) {
3374 k = eg_get_interpolator_index(
3375 rshader->input[i].interpolate,
3376 TGSI_INTERPOLATE_LOC_CENTROID);
3377 spi_baryc_cntl |= spi_baryc_enable_bit[k];
3378 }
3379 }
3380 }
3381
3382 sid = rshader->input[i].spi_sid;
3383
3384 if (sid) {
3385 tmp = S_028644_SEMANTIC(sid);
3386
3387 /* D3D 9 behaviour. GL is undefined */
3388 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR && rshader->input[i].sid == 0)
3389 tmp |= S_028644_DEFAULT_VAL(3);
3390
3391 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION ||
3392 rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
3393 (rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
3394 rctx->rasterizer && rctx->rasterizer->flatshade)) {
3395 tmp |= S_028644_FLAT_SHADE(1);
3396 }
3397
3398 if (rshader->input[i].name == TGSI_SEMANTIC_PCOORD ||
3399 (rshader->input[i].name == TGSI_SEMANTIC_TEXCOORD &&
3400 (sprite_coord_enable & (1 << rshader->input[i].sid)))) {
3401 tmp |= S_028644_PT_SPRITE_TEX(1);
3402 }
3403
3404 spi_ps_input_cntl[num++] = tmp;
3405 }
3406 }
3407
3408 r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, num);
3409 r600_store_array(cb, num, spi_ps_input_cntl);
3410
3411 for (i = 0; i < rshader->noutput; i++) {
3412 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
3413 z_export = 1;
3414 if (rshader->output[i].name == TGSI_SEMANTIC_STENCIL)
3415 stencil_export = 1;
3416 if (rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK &&
3417 rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0)
3418 mask_export = 1;
3419 }
3420 if (rshader->uses_kill)
3421 db_shader_control |= S_02880C_KILL_ENABLE(1);
3422
3423 db_shader_control |= S_02880C_Z_EXPORT_ENABLE(z_export);
3424 db_shader_control |= S_02880C_STENCIL_EXPORT_ENABLE(stencil_export);
3425 db_shader_control |= S_02880C_MASK_EXPORT_ENABLE(mask_export);
3426
3427 if (shader->selector->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]) {
3428 db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1) |
3429 S_02880C_EXEC_ON_NOOP(shader->selector->info.writes_memory);
3430 } else if (shader->selector->info.writes_memory) {
3431 db_shader_control |= S_02880C_EXEC_ON_HIER_FAIL(1);
3432 }
3433
3434 switch (rshader->ps_conservative_z) {
3435 default: /* fall through */
3436 case TGSI_FS_DEPTH_LAYOUT_ANY:
3437 db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_ANY_Z);
3438 break;
3439 case TGSI_FS_DEPTH_LAYOUT_GREATER:
3440 db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z);
3441 break;
3442 case TGSI_FS_DEPTH_LAYOUT_LESS:
3443 db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z);
3444 break;
3445 }
3446
3447 exports_ps = 0;
3448 for (i = 0; i < rshader->noutput; i++) {
3449 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION ||
3450 rshader->output[i].name == TGSI_SEMANTIC_STENCIL ||
3451 rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK)
3452 exports_ps |= 1;
3453 }
3454
3455 num_cout = rshader->ps_export_highest + 1;
3456
3457 exports_ps |= S_02884C_EXPORT_COLORS(num_cout);
3458 if (!exports_ps) {
3459 /* always at least export 1 component per pixel */
3460 exports_ps = 2;
3461 }
3462 shader->nr_ps_color_outputs = num_cout;
3463 shader->ps_color_export_mask = rshader->ps_color_export_mask;
3464 if (ninterp == 0) {
3465 ninterp = 1;
3466 have_perspective = TRUE;
3467 }
3468 if (!spi_baryc_cntl)
3469 spi_baryc_cntl |= spi_baryc_enable_bit[0];
3470
3471 if (!have_perspective && !have_linear)
3472 have_perspective = TRUE;
3473
3474 spi_ps_in_control_0 = S_0286CC_NUM_INTERP(ninterp) |
3475 S_0286CC_PERSP_GRADIENT_ENA(have_perspective) |
3476 S_0286CC_LINEAR_GRADIENT_ENA(have_linear);
3477 spi_input_z = 0;
3478 if (pos_index != -1) {
3479 spi_ps_in_control_0 |= S_0286CC_POSITION_ENA(1) |
3480 S_0286CC_POSITION_CENTROID(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) |
3481 S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr);
3482 spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1);
3483 }
3484
3485 spi_ps_in_control_1 = 0;
3486 if (face_index != -1) {
3487 spi_ps_in_control_1 |= S_0286D0_FRONT_FACE_ENA(1) |
3488 S_0286D0_FRONT_FACE_ADDR(rshader->input[face_index].gpr);
3489 }
3490 if (fixed_pt_position_index != -1) {
3491 spi_ps_in_control_1 |= S_0286D0_FIXED_PT_POSITION_ENA(1) |
3492 S_0286D0_FIXED_PT_POSITION_ADDR(rshader->input[fixed_pt_position_index].gpr);
3493 }
3494
3495 r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2);
3496 r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */
3497 r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */
3498
3499 r600_store_context_reg(cb, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
3500 r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z);
3501 r600_store_context_reg(cb, R_02884C_SQ_PGM_EXPORTS_PS, exports_ps);
3502
3503 r600_store_context_reg_seq(cb, R_028840_SQ_PGM_START_PS, 2);
3504 r600_store_value(cb, shader->bo->gpu_address >> 8);
3505 r600_store_value(cb, /* R_028844_SQ_PGM_RESOURCES_PS */
3506 S_028844_NUM_GPRS(rshader->bc.ngpr) |
3507 S_028844_PRIME_CACHE_ON_DRAW(1) |
3508 S_028844_DX10_CLAMP(1) |
3509 S_028844_STACK_SIZE(rshader->bc.nstack));
3510 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3511
3512 shader->db_shader_control = db_shader_control;
3513 shader->ps_depth_export = z_export | stencil_export | mask_export;
3514
3515 shader->sprite_coord_enable = sprite_coord_enable;
3516 if (rctx->rasterizer)
3517 shader->flatshade = rctx->rasterizer->flatshade;
3518 }
3519
evergreen_update_es_state(struct pipe_context * ctx,struct r600_pipe_shader * shader)3520 void evergreen_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3521 {
3522 struct r600_command_buffer *cb = &shader->command_buffer;
3523 struct r600_shader *rshader = &shader->shader;
3524
3525 r600_init_command_buffer(cb, 32);
3526
3527 r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES,
3528 S_028890_NUM_GPRS(rshader->bc.ngpr) |
3529 S_028890_DX10_CLAMP(1) |
3530 S_028890_STACK_SIZE(rshader->bc.nstack));
3531 r600_store_context_reg(cb, R_02888C_SQ_PGM_START_ES,
3532 shader->bo->gpu_address >> 8);
3533 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3534 }
3535
evergreen_update_gs_state(struct pipe_context * ctx,struct r600_pipe_shader * shader)3536 void evergreen_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3537 {
3538 struct r600_context *rctx = (struct r600_context *)ctx;
3539 struct r600_command_buffer *cb = &shader->command_buffer;
3540 struct r600_shader *rshader = &shader->shader;
3541 struct r600_shader *cp_shader = &shader->gs_copy_shader->shader;
3542 unsigned gsvs_itemsizes[4] = {
3543 (cp_shader->ring_item_sizes[0] * shader->selector->gs_max_out_vertices) >> 2,
3544 (cp_shader->ring_item_sizes[1] * shader->selector->gs_max_out_vertices) >> 2,
3545 (cp_shader->ring_item_sizes[2] * shader->selector->gs_max_out_vertices) >> 2,
3546 (cp_shader->ring_item_sizes[3] * shader->selector->gs_max_out_vertices) >> 2
3547 };
3548
3549 r600_init_command_buffer(cb, 64);
3550
3551 /* VGT_GS_MODE is written by evergreen_emit_shader_stages */
3552
3553
3554 r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT,
3555 S_028B38_MAX_VERT_OUT(shader->selector->gs_max_out_vertices));
3556 r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
3557 r600_conv_prim_to_gs_out(shader->selector->gs_output_prim));
3558
3559 if (rctx->screen->b.info.drm_minor >= 35) {
3560 r600_store_context_reg(cb, R_028B90_VGT_GS_INSTANCE_CNT,
3561 S_028B90_CNT(MIN2(shader->selector->gs_num_invocations, 127)) |
3562 S_028B90_ENABLE(shader->selector->gs_num_invocations > 0));
3563 }
3564 r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4);
3565 r600_store_value(cb, cp_shader->ring_item_sizes[0] >> 2);
3566 r600_store_value(cb, cp_shader->ring_item_sizes[1] >> 2);
3567 r600_store_value(cb, cp_shader->ring_item_sizes[2] >> 2);
3568 r600_store_value(cb, cp_shader->ring_item_sizes[3] >> 2);
3569
3570 r600_store_context_reg(cb, R_028900_SQ_ESGS_RING_ITEMSIZE,
3571 (rshader->ring_item_sizes[0]) >> 2);
3572
3573 r600_store_context_reg(cb, R_028904_SQ_GSVS_RING_ITEMSIZE,
3574 gsvs_itemsizes[0] +
3575 gsvs_itemsizes[1] +
3576 gsvs_itemsizes[2] +
3577 gsvs_itemsizes[3]);
3578
3579 r600_store_context_reg_seq(cb, R_02892C_SQ_GSVS_RING_OFFSET_1, 3);
3580 r600_store_value(cb, gsvs_itemsizes[0]);
3581 r600_store_value(cb, gsvs_itemsizes[0] + gsvs_itemsizes[1]);
3582 r600_store_value(cb, gsvs_itemsizes[0] + gsvs_itemsizes[1] + gsvs_itemsizes[2]);
3583
3584 /* FIXME calculate these values somehow ??? */
3585 r600_store_context_reg_seq(cb, R_028A54_GS_PER_ES, 3);
3586 r600_store_value(cb, 0x80); /* GS_PER_ES */
3587 r600_store_value(cb, 0x100); /* ES_PER_GS */
3588 r600_store_value(cb, 0x2); /* GS_PER_VS */
3589
3590 r600_store_context_reg(cb, R_028878_SQ_PGM_RESOURCES_GS,
3591 S_028878_NUM_GPRS(rshader->bc.ngpr) |
3592 S_028878_DX10_CLAMP(1) |
3593 S_028878_STACK_SIZE(rshader->bc.nstack));
3594 r600_store_context_reg(cb, R_028874_SQ_PGM_START_GS,
3595 shader->bo->gpu_address >> 8);
3596 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3597 }
3598
3599
evergreen_update_vs_state(struct pipe_context * ctx,struct r600_pipe_shader * shader)3600 void evergreen_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3601 {
3602 struct r600_command_buffer *cb = &shader->command_buffer;
3603 struct r600_shader *rshader = &shader->shader;
3604 unsigned spi_vs_out_id[10] = {};
3605 unsigned i, tmp, nparams = 0;
3606
3607 for (i = 0; i < rshader->noutput; i++) {
3608 if (rshader->output[i].spi_sid) {
3609 tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8);
3610 spi_vs_out_id[nparams / 4] |= tmp;
3611 nparams++;
3612 }
3613 }
3614
3615 r600_init_command_buffer(cb, 32);
3616
3617 r600_store_context_reg_seq(cb, R_02861C_SPI_VS_OUT_ID_0, 10);
3618 for (i = 0; i < 10; i++) {
3619 r600_store_value(cb, spi_vs_out_id[i]);
3620 }
3621
3622 /* Certain attributes (position, psize, etc.) don't count as params.
3623 * VS is required to export at least one param and r600_shader_from_tgsi()
3624 * takes care of adding a dummy export.
3625 */
3626 if (nparams < 1)
3627 nparams = 1;
3628
3629 r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG,
3630 S_0286C4_VS_EXPORT_COUNT(nparams - 1));
3631 r600_store_context_reg(cb, R_028860_SQ_PGM_RESOURCES_VS,
3632 S_028860_NUM_GPRS(rshader->bc.ngpr) |
3633 S_028860_DX10_CLAMP(1) |
3634 S_028860_STACK_SIZE(rshader->bc.nstack));
3635 if (rshader->vs_position_window_space) {
3636 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL,
3637 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
3638 } else {
3639 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL,
3640 S_028818_VTX_W0_FMT(1) |
3641 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3642 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3643 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3644
3645 }
3646 r600_store_context_reg(cb, R_02885C_SQ_PGM_START_VS,
3647 shader->bo->gpu_address >> 8);
3648 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3649
3650 shader->pa_cl_vs_out_cntl =
3651 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->cc_dist_mask & 0x0F) != 0) |
3652 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader->cc_dist_mask & 0xF0) != 0) |
3653 S_02881C_VS_OUT_MISC_VEC_ENA(rshader->vs_out_misc_write) |
3654 S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size) |
3655 S_02881C_USE_VTX_EDGE_FLAG(rshader->vs_out_edgeflag) |
3656 S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport) |
3657 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader->vs_out_layer);
3658 }
3659
evergreen_update_hs_state(struct pipe_context * ctx,struct r600_pipe_shader * shader)3660 void evergreen_update_hs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3661 {
3662 struct r600_command_buffer *cb = &shader->command_buffer;
3663 struct r600_shader *rshader = &shader->shader;
3664
3665 r600_init_command_buffer(cb, 32);
3666 r600_store_context_reg(cb, R_0288BC_SQ_PGM_RESOURCES_HS,
3667 S_0288BC_NUM_GPRS(rshader->bc.ngpr) |
3668 S_0288BC_DX10_CLAMP(1) |
3669 S_0288BC_STACK_SIZE(rshader->bc.nstack));
3670 r600_store_context_reg(cb, R_0288B8_SQ_PGM_START_HS,
3671 shader->bo->gpu_address >> 8);
3672 }
3673
evergreen_update_ls_state(struct pipe_context * ctx,struct r600_pipe_shader * shader)3674 void evergreen_update_ls_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3675 {
3676 struct r600_command_buffer *cb = &shader->command_buffer;
3677 struct r600_shader *rshader = &shader->shader;
3678
3679 r600_init_command_buffer(cb, 32);
3680 r600_store_context_reg(cb, R_0288D4_SQ_PGM_RESOURCES_LS,
3681 S_0288D4_NUM_GPRS(rshader->bc.ngpr) |
3682 S_0288D4_DX10_CLAMP(1) |
3683 S_0288D4_STACK_SIZE(rshader->bc.nstack));
3684 r600_store_context_reg(cb, R_0288D0_SQ_PGM_START_LS,
3685 shader->bo->gpu_address >> 8);
3686 }
evergreen_create_resolve_blend(struct r600_context * rctx)3687 void *evergreen_create_resolve_blend(struct r600_context *rctx)
3688 {
3689 struct pipe_blend_state blend;
3690
3691 memset(&blend, 0, sizeof(blend));
3692 blend.independent_blend_enable = true;
3693 blend.rt[0].colormask = 0xf;
3694 return evergreen_create_blend_state_mode(&rctx->b.b, &blend, V_028808_CB_RESOLVE);
3695 }
3696
evergreen_create_decompress_blend(struct r600_context * rctx)3697 void *evergreen_create_decompress_blend(struct r600_context *rctx)
3698 {
3699 struct pipe_blend_state blend;
3700 unsigned mode = rctx->screen->has_compressed_msaa_texturing ?
3701 V_028808_CB_FMASK_DECOMPRESS : V_028808_CB_DECOMPRESS;
3702
3703 memset(&blend, 0, sizeof(blend));
3704 blend.independent_blend_enable = true;
3705 blend.rt[0].colormask = 0xf;
3706 return evergreen_create_blend_state_mode(&rctx->b.b, &blend, mode);
3707 }
3708
evergreen_create_fastclear_blend(struct r600_context * rctx)3709 void *evergreen_create_fastclear_blend(struct r600_context *rctx)
3710 {
3711 struct pipe_blend_state blend;
3712 unsigned mode = V_028808_CB_ELIMINATE_FAST_CLEAR;
3713
3714 memset(&blend, 0, sizeof(blend));
3715 blend.independent_blend_enable = true;
3716 blend.rt[0].colormask = 0xf;
3717 return evergreen_create_blend_state_mode(&rctx->b.b, &blend, mode);
3718 }
3719
evergreen_create_db_flush_dsa(struct r600_context * rctx)3720 void *evergreen_create_db_flush_dsa(struct r600_context *rctx)
3721 {
3722 struct pipe_depth_stencil_alpha_state dsa = {{{0}}};
3723
3724 return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa);
3725 }
3726
evergreen_update_db_shader_control(struct r600_context * rctx)3727 void evergreen_update_db_shader_control(struct r600_context * rctx)
3728 {
3729 bool dual_export;
3730 unsigned db_shader_control;
3731
3732 if (!rctx->ps_shader) {
3733 return;
3734 }
3735
3736 dual_export = rctx->framebuffer.export_16bpc &&
3737 !rctx->ps_shader->current->ps_depth_export;
3738
3739 db_shader_control = rctx->ps_shader->current->db_shader_control |
3740 S_02880C_DUAL_EXPORT_ENABLE(dual_export) |
3741 S_02880C_DB_SOURCE_FORMAT(dual_export ? V_02880C_EXPORT_DB_TWO :
3742 V_02880C_EXPORT_DB_FULL) |
3743 S_02880C_ALPHA_TO_MASK_DISABLE(rctx->framebuffer.cb0_is_integer);
3744
3745 /* When alpha test is enabled we can't trust the hw to make the proper
3746 * decision on the order in which ztest should be run related to fragment
3747 * shader execution.
3748 *
3749 * If alpha test is enabled perform early z rejection (RE_Z) but don't early
3750 * write to the zbuffer. Write to zbuffer is delayed after fragment shader
3751 * execution and thus after alpha test so if discarded by the alpha test
3752 * the z value is not written.
3753 * If ReZ is enabled, and the zfunc/zenable/zwrite values change you can
3754 * get a hang unless you flush the DB in between. For now just use
3755 * LATE_Z.
3756 */
3757 if (rctx->alphatest_state.sx_alpha_test_control || rctx->ps_shader->info.writes_memory) {
3758 db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z);
3759 } else {
3760 db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
3761 }
3762
3763 if (db_shader_control != rctx->db_misc_state.db_shader_control) {
3764 rctx->db_misc_state.db_shader_control = db_shader_control;
3765 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
3766 }
3767 }
3768
evergreen_dma_copy_tile(struct r600_context * rctx,struct pipe_resource * dst,unsigned dst_level,unsigned dst_x,unsigned dst_y,unsigned dst_z,struct pipe_resource * src,unsigned src_level,unsigned src_x,unsigned src_y,unsigned src_z,unsigned copy_height,unsigned pitch,unsigned bpp)3769 static void evergreen_dma_copy_tile(struct r600_context *rctx,
3770 struct pipe_resource *dst,
3771 unsigned dst_level,
3772 unsigned dst_x,
3773 unsigned dst_y,
3774 unsigned dst_z,
3775 struct pipe_resource *src,
3776 unsigned src_level,
3777 unsigned src_x,
3778 unsigned src_y,
3779 unsigned src_z,
3780 unsigned copy_height,
3781 unsigned pitch,
3782 unsigned bpp)
3783 {
3784 struct radeon_cmdbuf *cs = &rctx->b.dma.cs;
3785 struct r600_texture *rsrc = (struct r600_texture*)src;
3786 struct r600_texture *rdst = (struct r600_texture*)dst;
3787 unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
3788 unsigned ncopy, height, cheight, detile, i, x, y, z, src_mode, dst_mode;
3789 unsigned sub_cmd, bank_h, bank_w, mt_aspect, nbanks, tile_split, non_disp_tiling = 0;
3790 uint64_t base, addr;
3791
3792 dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
3793 src_mode = rsrc->surface.u.legacy.level[src_level].mode;
3794 assert(dst_mode != src_mode);
3795
3796 /* non_disp_tiling bit needs to be set for depth, stencil, and fmask surfaces */
3797 if (util_format_has_depth(util_format_description(src->format)))
3798 non_disp_tiling = 1;
3799
3800 y = 0;
3801 sub_cmd = EG_DMA_COPY_TILED;
3802 lbpp = util_logbase2(bpp);
3803 pitch_tile_max = ((pitch / bpp) / 8) - 1;
3804 nbanks = eg_num_banks(rctx->screen->b.info.r600_num_banks);
3805
3806 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED) {
3807 /* T2L */
3808 array_mode = evergreen_array_mode(src_mode);
3809 slice_tile_max = (rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.u.legacy.level[src_level].nblk_y) / (8*8);
3810 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
3811 /* linear height must be the same as the slice tile max height, it's ok even
3812 * if the linear destination/source have smaller heigh as the size of the
3813 * dma packet will be using the copy_height which is always smaller or equal
3814 * to the linear height
3815 */
3816 height = u_minify(rsrc->resource.b.b.height0, src_level);
3817 detile = 1;
3818 x = src_x;
3819 y = src_y;
3820 z = src_z;
3821 base = (uint64_t)rsrc->surface.u.legacy.level[src_level].offset_256B * 256;
3822 addr = (uint64_t)rdst->surface.u.legacy.level[dst_level].offset_256B * 256;
3823 addr += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
3824 addr += dst_y * pitch + dst_x * bpp;
3825 bank_h = eg_bank_wh(rsrc->surface.u.legacy.bankh);
3826 bank_w = eg_bank_wh(rsrc->surface.u.legacy.bankw);
3827 mt_aspect = eg_macro_tile_aspect(rsrc->surface.u.legacy.mtilea);
3828 tile_split = eg_tile_split(rsrc->surface.u.legacy.tile_split);
3829 base += rsrc->resource.gpu_address;
3830 addr += rdst->resource.gpu_address;
3831 } else {
3832 /* L2T */
3833 array_mode = evergreen_array_mode(dst_mode);
3834 slice_tile_max = (rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.u.legacy.level[dst_level].nblk_y) / (8*8);
3835 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
3836 /* linear height must be the same as the slice tile max height, it's ok even
3837 * if the linear destination/source have smaller heigh as the size of the
3838 * dma packet will be using the copy_height which is always smaller or equal
3839 * to the linear height
3840 */
3841 height = u_minify(rdst->resource.b.b.height0, dst_level);
3842 detile = 0;
3843 x = dst_x;
3844 y = dst_y;
3845 z = dst_z;
3846 base = (uint64_t)rdst->surface.u.legacy.level[dst_level].offset_256B * 256;
3847 addr = (uint64_t)rsrc->surface.u.legacy.level[src_level].offset_256B * 256;
3848 addr += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_z;
3849 addr += src_y * pitch + src_x * bpp;
3850 bank_h = eg_bank_wh(rdst->surface.u.legacy.bankh);
3851 bank_w = eg_bank_wh(rdst->surface.u.legacy.bankw);
3852 mt_aspect = eg_macro_tile_aspect(rdst->surface.u.legacy.mtilea);
3853 tile_split = eg_tile_split(rdst->surface.u.legacy.tile_split);
3854 base += rdst->resource.gpu_address;
3855 addr += rsrc->resource.gpu_address;
3856 }
3857
3858 size = (copy_height * pitch) / 4;
3859 ncopy = (size / EG_DMA_COPY_MAX_SIZE) + !!(size % EG_DMA_COPY_MAX_SIZE);
3860 r600_need_dma_space(&rctx->b, ncopy * 9, &rdst->resource, &rsrc->resource);
3861
3862 for (i = 0; i < ncopy; i++) {
3863 cheight = copy_height;
3864 if (((cheight * pitch) / 4) > EG_DMA_COPY_MAX_SIZE) {
3865 cheight = (EG_DMA_COPY_MAX_SIZE * 4) / pitch;
3866 }
3867 size = (cheight * pitch) / 4;
3868 /* emit reloc before writing cs so that cs is always in consistent state */
3869 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource,
3870 RADEON_USAGE_READ, 0);
3871 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource,
3872 RADEON_USAGE_WRITE, 0);
3873 radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, size));
3874 radeon_emit(cs, base >> 8);
3875 radeon_emit(cs, (detile << 31) | (array_mode << 27) |
3876 (lbpp << 24) | (bank_h << 21) |
3877 (bank_w << 18) | (mt_aspect << 16));
3878 radeon_emit(cs, (pitch_tile_max << 0) | ((height - 1) << 16));
3879 radeon_emit(cs, (slice_tile_max << 0));
3880 radeon_emit(cs, (x << 0) | (z << 18));
3881 radeon_emit(cs, (y << 0) | (tile_split << 21) | (nbanks << 25) | (non_disp_tiling << 28));
3882 radeon_emit(cs, addr & 0xfffffffc);
3883 radeon_emit(cs, (addr >> 32UL) & 0xff);
3884 copy_height -= cheight;
3885 addr += cheight * pitch;
3886 y += cheight;
3887 }
3888 }
3889
evergreen_dma_copy(struct pipe_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)3890 static void evergreen_dma_copy(struct pipe_context *ctx,
3891 struct pipe_resource *dst,
3892 unsigned dst_level,
3893 unsigned dstx, unsigned dsty, unsigned dstz,
3894 struct pipe_resource *src,
3895 unsigned src_level,
3896 const struct pipe_box *src_box)
3897 {
3898 struct r600_context *rctx = (struct r600_context *)ctx;
3899 struct r600_texture *rsrc = (struct r600_texture*)src;
3900 struct r600_texture *rdst = (struct r600_texture*)dst;
3901 unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height;
3902 unsigned src_w, dst_w;
3903 unsigned src_x, src_y;
3904 unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
3905
3906 if (rctx->b.dma.cs.priv == NULL) {
3907 goto fallback;
3908 }
3909
3910 if (rctx->cmd_buf_is_compute) {
3911 rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
3912 rctx->cmd_buf_is_compute = false;
3913 }
3914
3915 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
3916 evergreen_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width);
3917 return;
3918 }
3919
3920 if (src_box->depth > 1 ||
3921 !r600_prepare_for_dma_blit(&rctx->b, rdst, dst_level, dstx, dsty,
3922 dstz, rsrc, src_level, src_box))
3923 goto fallback;
3924
3925 src_x = util_format_get_nblocksx(src->format, src_box->x);
3926 dst_x = util_format_get_nblocksx(src->format, dst_x);
3927 src_y = util_format_get_nblocksy(src->format, src_box->y);
3928 dst_y = util_format_get_nblocksy(src->format, dst_y);
3929
3930 bpp = rdst->surface.bpe;
3931 dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
3932 src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
3933 src_w = u_minify(rsrc->resource.b.b.width0, src_level);
3934 dst_w = u_minify(rdst->resource.b.b.width0, dst_level);
3935 copy_height = src_box->height / rsrc->surface.blk_h;
3936
3937 dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
3938 src_mode = rsrc->surface.u.legacy.level[src_level].mode;
3939
3940 if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) {
3941 /* FIXME evergreen can do partial blit */
3942 goto fallback;
3943 }
3944 /* the x test here are currently useless (because we don't support partial blit)
3945 * but keep them around so we don't forget about those
3946 */
3947 if (src_pitch % 8 || src_box->x % 8 || dst_x % 8 || src_box->y % 8 || dst_y % 8) {
3948 goto fallback;
3949 }
3950
3951 /* 128 bpp surfaces require non_disp_tiling for both
3952 * tiled and linear buffers on cayman. However, async
3953 * DMA only supports it on the tiled side. As such
3954 * the tile order is backwards after a L2T/T2L packet.
3955 */
3956 if ((rctx->b.chip_class == CAYMAN) &&
3957 (src_mode != dst_mode) &&
3958 (util_format_get_blocksize(src->format) >= 16)) {
3959 goto fallback;
3960 }
3961
3962 if (src_mode == dst_mode) {
3963 uint64_t dst_offset, src_offset;
3964 /* simple dma blit would do NOTE code here assume :
3965 * src_box.x/y == 0
3966 * dst_x/y == 0
3967 * dst_pitch == src_pitch
3968 */
3969 src_offset= (uint64_t)rsrc->surface.u.legacy.level[src_level].offset_256B * 256;
3970 src_offset += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z;
3971 src_offset += src_y * src_pitch + src_x * bpp;
3972 dst_offset = (uint64_t)rdst->surface.u.legacy.level[dst_level].offset_256B * 256;
3973 dst_offset += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
3974 dst_offset += dst_y * dst_pitch + dst_x * bpp;
3975 evergreen_dma_copy_buffer(rctx, dst, src, dst_offset, src_offset,
3976 src_box->height * src_pitch);
3977 } else {
3978 evergreen_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z,
3979 src, src_level, src_x, src_y, src_box->z,
3980 copy_height, dst_pitch, bpp);
3981 }
3982 return;
3983
3984 fallback:
3985 r600_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
3986 src, src_level, src_box);
3987 }
3988
evergreen_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])3989 static void evergreen_set_tess_state(struct pipe_context *ctx,
3990 const float default_outer_level[4],
3991 const float default_inner_level[2])
3992 {
3993 struct r600_context *rctx = (struct r600_context *)ctx;
3994
3995 memcpy(rctx->tess_state, default_outer_level, sizeof(float) * 4);
3996 memcpy(rctx->tess_state+4, default_inner_level, sizeof(float) * 2);
3997 rctx->driver_consts[PIPE_SHADER_TESS_CTRL].tcs_default_levels_dirty = true;
3998 }
3999
evergreen_set_patch_vertices(struct pipe_context * ctx,uint8_t patch_vertices)4000 static void evergreen_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
4001 {
4002 struct r600_context *rctx = (struct r600_context *)ctx;
4003
4004 rctx->patch_vertices = patch_vertices;
4005 }
4006
evergreen_setup_immed_buffer(struct r600_context * rctx,struct r600_image_view * rview,enum pipe_format pformat)4007 static void evergreen_setup_immed_buffer(struct r600_context *rctx,
4008 struct r600_image_view *rview,
4009 enum pipe_format pformat)
4010 {
4011 struct r600_screen *rscreen = (struct r600_screen *)rctx->b.b.screen;
4012 uint32_t immed_size = rscreen->b.info.max_se * 256 * 64 * util_format_get_blocksize(pformat);
4013 struct eg_buf_res_params buf_params;
4014 bool skip_reloc = false;
4015 struct r600_resource *resource = (struct r600_resource *)rview->base.resource;
4016 if (!resource->immed_buffer) {
4017 eg_resource_alloc_immed(&rscreen->b, resource, immed_size);
4018 }
4019
4020 memset(&buf_params, 0, sizeof(buf_params));
4021 buf_params.pipe_format = pformat;
4022 buf_params.size = resource->immed_buffer->b.b.width0;
4023 buf_params.swizzle[0] = PIPE_SWIZZLE_X;
4024 buf_params.swizzle[1] = PIPE_SWIZZLE_Y;
4025 buf_params.swizzle[2] = PIPE_SWIZZLE_Z;
4026 buf_params.swizzle[3] = PIPE_SWIZZLE_W;
4027 buf_params.uncached = 1;
4028 evergreen_fill_buffer_resource_words(rctx, &resource->immed_buffer->b.b,
4029 &buf_params, &skip_reloc,
4030 rview->immed_resource_words);
4031 }
4032
evergreen_set_hw_atomic_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)4033 static void evergreen_set_hw_atomic_buffers(struct pipe_context *ctx,
4034 unsigned start_slot,
4035 unsigned count,
4036 const struct pipe_shader_buffer *buffers)
4037 {
4038 struct r600_context *rctx = (struct r600_context *)ctx;
4039 struct r600_atomic_buffer_state *astate;
4040 unsigned i, idx;
4041
4042 astate = &rctx->atomic_buffer_state;
4043
4044 /* we'd probably like to expand this to 8 later so put the logic in */
4045 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4046 const struct pipe_shader_buffer *buf;
4047 struct pipe_shader_buffer *abuf;
4048
4049 abuf = &astate->buffer[i];
4050
4051 if (!buffers || !buffers[idx].buffer) {
4052 pipe_resource_reference(&abuf->buffer, NULL);
4053 continue;
4054 }
4055 buf = &buffers[idx];
4056
4057 pipe_resource_reference(&abuf->buffer, buf->buffer);
4058 abuf->buffer_offset = buf->buffer_offset;
4059 abuf->buffer_size = buf->buffer_size;
4060 }
4061 }
4062
evergreen_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)4063 static void evergreen_set_shader_buffers(struct pipe_context *ctx,
4064 enum pipe_shader_type shader, unsigned start_slot,
4065 unsigned count,
4066 const struct pipe_shader_buffer *buffers,
4067 unsigned writable_bitmask)
4068 {
4069 struct r600_context *rctx = (struct r600_context *)ctx;
4070 struct r600_image_state *istate = NULL;
4071 struct r600_image_view *rview;
4072 struct r600_tex_color_info color;
4073 struct eg_buf_res_params buf_params;
4074 struct r600_resource *resource;
4075 unsigned i, idx;
4076 unsigned old_mask;
4077
4078 if (shader != PIPE_SHADER_FRAGMENT &&
4079 shader != PIPE_SHADER_COMPUTE && count == 0)
4080 return;
4081
4082 if (shader == PIPE_SHADER_FRAGMENT)
4083 istate = &rctx->fragment_buffers;
4084 else if (shader == PIPE_SHADER_COMPUTE)
4085 istate = &rctx->compute_buffers;
4086
4087 old_mask = istate->enabled_mask;
4088 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4089 const struct pipe_shader_buffer *buf;
4090 unsigned res_type;
4091
4092 rview = &istate->views[i];
4093
4094 if (!buffers || !buffers[idx].buffer) {
4095 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL);
4096 istate->enabled_mask &= ~(1 << i);
4097 continue;
4098 }
4099
4100 buf = &buffers[idx];
4101 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, buf->buffer);
4102
4103 resource = (struct r600_resource *)rview->base.resource;
4104
4105 evergreen_setup_immed_buffer(rctx, rview, PIPE_FORMAT_R32_UINT);
4106
4107 color.offset = 0;
4108 color.view = 0;
4109 evergreen_set_color_surface_buffer(rctx, resource,
4110 PIPE_FORMAT_R32_UINT,
4111 buf->buffer_offset,
4112 buf->buffer_offset + buf->buffer_size,
4113 &color);
4114
4115 res_type = V_028C70_BUFFER;
4116
4117 rview->cb_color_base = color.offset;
4118 rview->cb_color_dim = color.dim;
4119 rview->cb_color_info = color.info |
4120 S_028C70_RAT(1) |
4121 S_028C70_RESOURCE_TYPE(res_type);
4122 rview->cb_color_pitch = color.pitch;
4123 rview->cb_color_slice = color.slice;
4124 rview->cb_color_view = color.view;
4125 rview->cb_color_attrib = color.attrib;
4126 rview->cb_color_fmask = color.fmask;
4127 rview->cb_color_fmask_slice = color.fmask_slice;
4128
4129 memset(&buf_params, 0, sizeof(buf_params));
4130 buf_params.pipe_format = PIPE_FORMAT_R32_UINT;
4131 buf_params.offset = buf->buffer_offset;
4132 buf_params.size = buf->buffer_size;
4133 buf_params.swizzle[0] = PIPE_SWIZZLE_X;
4134 buf_params.swizzle[1] = PIPE_SWIZZLE_Y;
4135 buf_params.swizzle[2] = PIPE_SWIZZLE_Z;
4136 buf_params.swizzle[3] = PIPE_SWIZZLE_W;
4137 buf_params.force_swizzle = true;
4138 buf_params.uncached = 1;
4139 buf_params.size_in_bytes = true;
4140 evergreen_fill_buffer_resource_words(rctx, &resource->b.b,
4141 &buf_params,
4142 &rview->skip_mip_address_reloc,
4143 rview->resource_words);
4144
4145 istate->enabled_mask |= (1 << i);
4146 }
4147
4148 istate->atom.num_dw = util_bitcount(istate->enabled_mask) * 46;
4149
4150 if (old_mask != istate->enabled_mask)
4151 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
4152
4153 /* construct the target mask */
4154 if (rctx->cb_misc_state.buffer_rat_enabled_mask != istate->enabled_mask) {
4155 rctx->cb_misc_state.buffer_rat_enabled_mask = istate->enabled_mask;
4156 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
4157 }
4158
4159 if (shader == PIPE_SHADER_FRAGMENT)
4160 r600_mark_atom_dirty(rctx, &istate->atom);
4161 }
4162
evergreen_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)4163 static void evergreen_set_shader_images(struct pipe_context *ctx,
4164 enum pipe_shader_type shader, unsigned start_slot,
4165 unsigned count, unsigned unbind_num_trailing_slots,
4166 const struct pipe_image_view *images)
4167 {
4168 struct r600_context *rctx = (struct r600_context *)ctx;
4169 unsigned i;
4170 struct r600_image_view *rview;
4171 struct pipe_resource *image;
4172 struct r600_resource *resource;
4173 struct r600_tex_color_info color;
4174 struct eg_buf_res_params buf_params;
4175 struct eg_tex_res_params tex_params;
4176 unsigned old_mask;
4177 struct r600_image_state *istate = NULL;
4178 int idx;
4179 if (shader != PIPE_SHADER_FRAGMENT && shader != PIPE_SHADER_COMPUTE)
4180 return;
4181 if (!count && !unbind_num_trailing_slots)
4182 return;
4183
4184 if (shader == PIPE_SHADER_FRAGMENT)
4185 istate = &rctx->fragment_images;
4186 else if (shader == PIPE_SHADER_COMPUTE)
4187 istate = &rctx->compute_images;
4188
4189 assert (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE);
4190
4191 old_mask = istate->enabled_mask;
4192 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4193 unsigned res_type;
4194 const struct pipe_image_view *iview;
4195 rview = &istate->views[i];
4196
4197 if (!images || !images[idx].resource) {
4198 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL);
4199 istate->enabled_mask &= ~(1 << i);
4200 istate->compressed_colortex_mask &= ~(1 << i);
4201 istate->compressed_depthtex_mask &= ~(1 << i);
4202 continue;
4203 }
4204
4205 iview = &images[idx];
4206 image = iview->resource;
4207 resource = (struct r600_resource *)image;
4208
4209 r600_context_add_resource_size(ctx, image);
4210
4211 rview->base = *iview;
4212 rview->base.resource = NULL;
4213 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, image);
4214
4215 evergreen_setup_immed_buffer(rctx, rview, iview->format);
4216
4217 bool is_buffer = image->target == PIPE_BUFFER;
4218 struct r600_texture *rtex = (struct r600_texture *)image;
4219 if (!is_buffer & rtex->db_compatible)
4220 istate->compressed_depthtex_mask |= 1 << i;
4221 else
4222 istate->compressed_depthtex_mask &= ~(1 << i);
4223
4224 if (!is_buffer && rtex->cmask.size)
4225 istate->compressed_colortex_mask |= 1 << i;
4226 else
4227 istate->compressed_colortex_mask &= ~(1 << i);
4228 if (!is_buffer) {
4229
4230 evergreen_set_color_surface_common(rctx, rtex,
4231 iview->u.tex.level,
4232 iview->u.tex.first_layer,
4233 iview->u.tex.last_layer,
4234 iview->format,
4235 &color);
4236 color.dim = S_028C78_WIDTH_MAX(u_minify(image->width0, iview->u.tex.level) - 1) |
4237 S_028C78_HEIGHT_MAX(u_minify(image->height0, iview->u.tex.level) - 1);
4238 } else {
4239 color.offset = 0;
4240 color.view = 0;
4241 evergreen_set_color_surface_buffer(rctx, resource,
4242 iview->format,
4243 iview->u.buf.offset,
4244 iview->u.buf.size,
4245 &color);
4246 }
4247
4248 switch (image->target) {
4249 case PIPE_BUFFER:
4250 res_type = V_028C70_BUFFER;
4251 break;
4252 case PIPE_TEXTURE_1D:
4253 res_type = V_028C70_TEXTURE1D;
4254 break;
4255 case PIPE_TEXTURE_1D_ARRAY:
4256 res_type = V_028C70_TEXTURE1DARRAY;
4257 break;
4258 case PIPE_TEXTURE_2D:
4259 case PIPE_TEXTURE_RECT:
4260 res_type = V_028C70_TEXTURE2D;
4261 break;
4262 case PIPE_TEXTURE_3D:
4263 res_type = V_028C70_TEXTURE3D;
4264 break;
4265 case PIPE_TEXTURE_2D_ARRAY:
4266 case PIPE_TEXTURE_CUBE:
4267 case PIPE_TEXTURE_CUBE_ARRAY:
4268 res_type = V_028C70_TEXTURE2DARRAY;
4269 break;
4270 default:
4271 assert(0);
4272 res_type = 0;
4273 break;
4274 }
4275
4276 rview->cb_color_base = color.offset;
4277 rview->cb_color_dim = color.dim;
4278 rview->cb_color_info = color.info |
4279 S_028C70_RAT(1) |
4280 S_028C70_RESOURCE_TYPE(res_type);
4281 rview->cb_color_pitch = color.pitch;
4282 rview->cb_color_slice = color.slice;
4283 rview->cb_color_view = color.view;
4284 rview->cb_color_attrib = color.attrib;
4285 rview->cb_color_fmask = color.fmask;
4286 rview->cb_color_fmask_slice = color.fmask_slice;
4287
4288 if (image->target != PIPE_BUFFER) {
4289 memset(&tex_params, 0, sizeof(tex_params));
4290 tex_params.pipe_format = iview->format;
4291 tex_params.force_level = 0;
4292 tex_params.width0 = image->width0;
4293 tex_params.height0 = image->height0;
4294 tex_params.first_level = iview->u.tex.level;
4295 tex_params.last_level = iview->u.tex.level;
4296 tex_params.first_layer = iview->u.tex.first_layer;
4297 tex_params.last_layer = iview->u.tex.last_layer;
4298 tex_params.target = image->target;
4299 tex_params.swizzle[0] = PIPE_SWIZZLE_X;
4300 tex_params.swizzle[1] = PIPE_SWIZZLE_Y;
4301 tex_params.swizzle[2] = PIPE_SWIZZLE_Z;
4302 tex_params.swizzle[3] = PIPE_SWIZZLE_W;
4303 evergreen_fill_tex_resource_words(rctx, &resource->b.b, &tex_params,
4304 &rview->skip_mip_address_reloc,
4305 rview->resource_words);
4306
4307 } else {
4308 memset(&buf_params, 0, sizeof(buf_params));
4309 buf_params.pipe_format = iview->format;
4310 buf_params.size = iview->u.buf.size;
4311 buf_params.offset = iview->u.buf.offset;
4312 buf_params.swizzle[0] = PIPE_SWIZZLE_X;
4313 buf_params.swizzle[1] = PIPE_SWIZZLE_Y;
4314 buf_params.swizzle[2] = PIPE_SWIZZLE_Z;
4315 buf_params.swizzle[3] = PIPE_SWIZZLE_W;
4316 evergreen_fill_buffer_resource_words(rctx, &resource->b.b,
4317 &buf_params,
4318 &rview->skip_mip_address_reloc,
4319 rview->resource_words);
4320 }
4321 istate->enabled_mask |= (1 << i);
4322 }
4323
4324 for (i = start_slot + count, idx = 0;
4325 i < start_slot + count + unbind_num_trailing_slots; i++, idx++) {
4326 rview = &istate->views[i];
4327
4328 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL);
4329 istate->enabled_mask &= ~(1 << i);
4330 istate->compressed_colortex_mask &= ~(1 << i);
4331 istate->compressed_depthtex_mask &= ~(1 << i);
4332 }
4333
4334 istate->atom.num_dw = util_bitcount(istate->enabled_mask) * 46;
4335 istate->dirty_buffer_constants = TRUE;
4336 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
4337 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
4338 R600_CONTEXT_FLUSH_AND_INV_CB_META;
4339
4340 if (old_mask != istate->enabled_mask)
4341 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
4342
4343 if (rctx->cb_misc_state.image_rat_enabled_mask != istate->enabled_mask) {
4344 rctx->cb_misc_state.image_rat_enabled_mask = istate->enabled_mask;
4345 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
4346 }
4347
4348 if (shader == PIPE_SHADER_FRAGMENT)
4349 r600_mark_atom_dirty(rctx, &istate->atom);
4350 }
4351
evergreen_get_pipe_constant_buffer(struct r600_context * rctx,enum pipe_shader_type shader,uint slot,struct pipe_constant_buffer * cbuf)4352 static void evergreen_get_pipe_constant_buffer(struct r600_context *rctx,
4353 enum pipe_shader_type shader, uint slot,
4354 struct pipe_constant_buffer *cbuf)
4355 {
4356 struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
4357 struct pipe_constant_buffer *cb;
4358 cbuf->user_buffer = NULL;
4359
4360 cb = &state->cb[slot];
4361
4362 cbuf->buffer_size = cb->buffer_size;
4363 pipe_resource_reference(&cbuf->buffer, cb->buffer);
4364 }
4365
evergreen_get_shader_buffers(struct r600_context * rctx,enum pipe_shader_type shader,uint start_slot,uint count,struct pipe_shader_buffer * sbuf)4366 static void evergreen_get_shader_buffers(struct r600_context *rctx,
4367 enum pipe_shader_type shader,
4368 uint start_slot, uint count,
4369 struct pipe_shader_buffer *sbuf)
4370 {
4371 assert(shader == PIPE_SHADER_COMPUTE);
4372 int idx, i;
4373 struct r600_image_state *istate = &rctx->compute_buffers;
4374 struct r600_image_view *rview;
4375
4376 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4377
4378 rview = &istate->views[i];
4379
4380 pipe_resource_reference(&sbuf[idx].buffer, rview->base.resource);
4381 if (rview->base.resource) {
4382 uint64_t rview_va = ((struct r600_resource *)rview->base.resource)->gpu_address;
4383
4384 uint64_t prog_va = rview->resource_words[0];
4385
4386 prog_va += ((uint64_t)G_030008_BASE_ADDRESS_HI(rview->resource_words[2])) << 32;
4387 prog_va -= rview_va;
4388
4389 sbuf[idx].buffer_offset = prog_va & 0xffffffff;
4390 sbuf[idx].buffer_size = rview->resource_words[1] + 1;;
4391 } else {
4392 sbuf[idx].buffer_offset = 0;
4393 sbuf[idx].buffer_size = 0;
4394 }
4395 }
4396 }
4397
evergreen_save_qbo_state(struct pipe_context * ctx,struct r600_qbo_state * st)4398 static void evergreen_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
4399 {
4400 struct r600_context *rctx = (struct r600_context *)ctx;
4401 st->saved_compute = rctx->cs_shader_state.shader;
4402
4403 /* save constant buffer 0 */
4404 evergreen_get_pipe_constant_buffer(rctx, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
4405 /* save ssbo 0 */
4406 evergreen_get_shader_buffers(rctx, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
4407 }
4408
4409
evergreen_init_state_functions(struct r600_context * rctx)4410 void evergreen_init_state_functions(struct r600_context *rctx)
4411 {
4412 unsigned id = 1;
4413 unsigned i;
4414 /* !!!
4415 * To avoid GPU lockup registers must be emitted in a specific order
4416 * (no kidding ...). The order below is important and have been
4417 * partially inferred from analyzing fglrx command stream.
4418 *
4419 * Don't reorder atom without carefully checking the effect (GPU lockup
4420 * or piglit regression).
4421 * !!!
4422 */
4423 if (rctx->b.chip_class == EVERGREEN) {
4424 r600_init_atom(rctx, &rctx->config_state.atom, id++, evergreen_emit_config_state, 11);
4425 rctx->config_state.dyn_gpr_enabled = true;
4426 }
4427 r600_init_atom(rctx, &rctx->framebuffer.atom, id++, evergreen_emit_framebuffer_state, 0);
4428 r600_init_atom(rctx, &rctx->fragment_images.atom, id++, evergreen_emit_fragment_image_state, 0);
4429 r600_init_atom(rctx, &rctx->compute_images.atom, id++, evergreen_emit_compute_image_state, 0);
4430 r600_init_atom(rctx, &rctx->fragment_buffers.atom, id++, evergreen_emit_fragment_buffer_state, 0);
4431 r600_init_atom(rctx, &rctx->compute_buffers.atom, id++, evergreen_emit_compute_buffer_state, 0);
4432 /* shader const */
4433 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, evergreen_emit_vs_constant_buffers, 0);
4434 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, evergreen_emit_gs_constant_buffers, 0);
4435 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, evergreen_emit_ps_constant_buffers, 0);
4436 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_CTRL].atom, id++, evergreen_emit_tcs_constant_buffers, 0);
4437 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_EVAL].atom, id++, evergreen_emit_tes_constant_buffers, 0);
4438 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom, id++, evergreen_emit_cs_constant_buffers, 0);
4439 /* shader program */
4440 r600_init_atom(rctx, &rctx->cs_shader_state.atom, id++, evergreen_emit_cs_shader, 0);
4441 /* sampler */
4442 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, evergreen_emit_vs_sampler_states, 0);
4443 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, evergreen_emit_gs_sampler_states, 0);
4444 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].states.atom, id++, evergreen_emit_tcs_sampler_states, 0);
4445 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].states.atom, id++, evergreen_emit_tes_sampler_states, 0);
4446 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, evergreen_emit_ps_sampler_states, 0);
4447 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].states.atom, id++, evergreen_emit_cs_sampler_states, 0);
4448 /* resources */
4449 r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, evergreen_fs_emit_vertex_buffers, 0);
4450 r600_init_atom(rctx, &rctx->cs_vertex_buffer_state.atom, id++, evergreen_cs_emit_vertex_buffers, 0);
4451 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, evergreen_emit_vs_sampler_views, 0);
4452 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, evergreen_emit_gs_sampler_views, 0);
4453 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].views.atom, id++, evergreen_emit_tcs_sampler_views, 0);
4454 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].views.atom, id++, evergreen_emit_tes_sampler_views, 0);
4455 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, evergreen_emit_ps_sampler_views, 0);
4456 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom, id++, evergreen_emit_cs_sampler_views, 0);
4457
4458 r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 10);
4459
4460 if (rctx->b.chip_class == EVERGREEN) {
4461 r600_init_atom(rctx, &rctx->sample_mask.atom, id++, evergreen_emit_sample_mask, 3);
4462 } else {
4463 r600_init_atom(rctx, &rctx->sample_mask.atom, id++, cayman_emit_sample_mask, 4);
4464 }
4465 rctx->sample_mask.sample_mask = ~0;
4466
4467 r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6);
4468 r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6);
4469 r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0);
4470 r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, evergreen_emit_cb_misc_state, 4);
4471 r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 9);
4472 r600_init_atom(rctx, &rctx->clip_state.atom, id++, evergreen_emit_clip_state, 26);
4473 r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, evergreen_emit_db_misc_state, 10);
4474 r600_init_atom(rctx, &rctx->db_state.atom, id++, evergreen_emit_db_state, 14);
4475 r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0);
4476 r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, evergreen_emit_polygon_offset, 9);
4477 r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0);
4478 r600_add_atom(rctx, &rctx->b.scissors.atom, id++);
4479 r600_add_atom(rctx, &rctx->b.viewports.atom, id++);
4480 r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
4481 r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, evergreen_emit_vertex_fetch_shader, 5);
4482 r600_add_atom(rctx, &rctx->b.render_cond_atom, id++);
4483 r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++);
4484 r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++);
4485 for (i = 0; i < EG_NUM_HW_STAGES; i++)
4486 r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0);
4487 r600_init_atom(rctx, &rctx->shader_stages.atom, id++, evergreen_emit_shader_stages, 15);
4488 r600_init_atom(rctx, &rctx->gs_rings.atom, id++, evergreen_emit_gs_rings, 26);
4489
4490 rctx->b.b.create_blend_state = evergreen_create_blend_state;
4491 rctx->b.b.create_depth_stencil_alpha_state = evergreen_create_dsa_state;
4492 rctx->b.b.create_rasterizer_state = evergreen_create_rs_state;
4493 rctx->b.b.create_sampler_state = evergreen_create_sampler_state;
4494 rctx->b.b.create_sampler_view = evergreen_create_sampler_view;
4495 rctx->b.b.set_framebuffer_state = evergreen_set_framebuffer_state;
4496 rctx->b.b.set_polygon_stipple = evergreen_set_polygon_stipple;
4497 rctx->b.b.set_min_samples = evergreen_set_min_samples;
4498 rctx->b.b.set_tess_state = evergreen_set_tess_state;
4499 rctx->b.b.set_patch_vertices = evergreen_set_patch_vertices;
4500 rctx->b.b.set_hw_atomic_buffers = evergreen_set_hw_atomic_buffers;
4501 rctx->b.b.set_shader_images = evergreen_set_shader_images;
4502 rctx->b.b.set_shader_buffers = evergreen_set_shader_buffers;
4503 if (rctx->b.chip_class == EVERGREEN)
4504 rctx->b.b.get_sample_position = evergreen_get_sample_position;
4505 else
4506 rctx->b.b.get_sample_position = cayman_get_sample_position;
4507 rctx->b.dma_copy = evergreen_dma_copy;
4508 rctx->b.save_qbo_state = evergreen_save_qbo_state;
4509
4510 evergreen_init_compute_state_functions(rctx);
4511 }
4512
4513 /**
4514 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
4515 *
4516 * The information about LDS and other non-compile-time parameters is then
4517 * written to the const buffer.
4518
4519 * const buffer contains -
4520 * uint32_t input_patch_size
4521 * uint32_t input_vertex_size
4522 * uint32_t num_tcs_input_cp
4523 * uint32_t num_tcs_output_cp;
4524 * uint32_t output_patch_size
4525 * uint32_t output_vertex_size
4526 * uint32_t output_patch0_offset
4527 * uint32_t perpatch_output_offset
4528 * and the same constbuf is bound to LS/HS/VS(ES).
4529 */
evergreen_setup_tess_constants(struct r600_context * rctx,const struct pipe_draw_info * info,unsigned * num_patches)4530 void evergreen_setup_tess_constants(struct r600_context *rctx, const struct pipe_draw_info *info, unsigned *num_patches)
4531 {
4532 struct pipe_constant_buffer constbuf = {0};
4533 struct r600_pipe_shader_selector *tcs = rctx->tcs_shader ? rctx->tcs_shader : rctx->tes_shader;
4534 struct r600_pipe_shader_selector *ls = rctx->vs_shader;
4535 unsigned num_tcs_input_cp = rctx->patch_vertices;
4536 unsigned num_tcs_outputs;
4537 unsigned num_tcs_output_cp;
4538 unsigned num_tcs_patch_outputs;
4539 unsigned num_tcs_inputs;
4540 unsigned input_vertex_size, output_vertex_size;
4541 unsigned input_patch_size, pervertex_output_patch_size, output_patch_size;
4542 unsigned output_patch0_offset, perpatch_output_offset, lds_size;
4543 uint32_t values[8];
4544 unsigned num_waves;
4545 unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
4546 unsigned wave_divisor = (16 * num_pipes);
4547
4548 *num_patches = 1;
4549
4550 if (!rctx->tes_shader) {
4551 rctx->lds_alloc = 0;
4552 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
4553 R600_LDS_INFO_CONST_BUFFER, false, NULL);
4554 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL,
4555 R600_LDS_INFO_CONST_BUFFER, false, NULL);
4556 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
4557 R600_LDS_INFO_CONST_BUFFER, false, NULL);
4558 return;
4559 }
4560
4561 if (rctx->lds_alloc != 0 &&
4562 rctx->last_ls == ls &&
4563 rctx->last_num_tcs_input_cp == num_tcs_input_cp &&
4564 rctx->last_tcs == tcs)
4565 return;
4566
4567 num_tcs_inputs = util_last_bit64(ls->lds_outputs_written_mask);
4568
4569 if (rctx->tcs_shader) {
4570 num_tcs_outputs = util_last_bit64(tcs->lds_outputs_written_mask);
4571 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
4572 num_tcs_patch_outputs = util_last_bit64(tcs->lds_patch_outputs_written_mask);
4573 } else {
4574 num_tcs_outputs = num_tcs_inputs;
4575 num_tcs_output_cp = num_tcs_input_cp;
4576 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
4577 }
4578
4579 /* size in bytes */
4580 input_vertex_size = num_tcs_inputs * 16;
4581 output_vertex_size = num_tcs_outputs * 16;
4582
4583 input_patch_size = num_tcs_input_cp * input_vertex_size;
4584
4585 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
4586 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
4587
4588 output_patch0_offset = rctx->tcs_shader ? input_patch_size * *num_patches : 0;
4589 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
4590
4591 lds_size = output_patch0_offset + output_patch_size * *num_patches;
4592
4593 values[0] = input_patch_size;
4594 values[1] = input_vertex_size;
4595 values[2] = num_tcs_input_cp;
4596 values[3] = num_tcs_output_cp;
4597
4598 values[4] = output_patch_size;
4599 values[5] = output_vertex_size;
4600 values[6] = output_patch0_offset;
4601 values[7] = perpatch_output_offset;
4602
4603 /* docs say HS_NUM_WAVES - CEIL((LS_HS_CONFIG.NUM_PATCHES *
4604 LS_HS_CONFIG.HS_NUM_OUTPUT_CP) / (NUM_GOOD_PIPES * 16)) */
4605 num_waves = ceilf((float)(*num_patches * num_tcs_output_cp) / (float)wave_divisor);
4606
4607 rctx->lds_alloc = (lds_size | (num_waves << 14));
4608
4609 rctx->last_ls = ls;
4610 rctx->last_tcs = tcs;
4611 rctx->last_num_tcs_input_cp = num_tcs_input_cp;
4612
4613 constbuf.user_buffer = values;
4614 constbuf.buffer_size = 8 * 4;
4615
4616 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
4617 R600_LDS_INFO_CONST_BUFFER, false, &constbuf);
4618 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL,
4619 R600_LDS_INFO_CONST_BUFFER, false, &constbuf);
4620 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
4621 R600_LDS_INFO_CONST_BUFFER, true, &constbuf);
4622 }
4623
evergreen_get_ls_hs_config(struct r600_context * rctx,const struct pipe_draw_info * info,unsigned num_patches)4624 uint32_t evergreen_get_ls_hs_config(struct r600_context *rctx,
4625 const struct pipe_draw_info *info,
4626 unsigned num_patches)
4627 {
4628 unsigned num_output_cp;
4629
4630 if (!rctx->tes_shader)
4631 return 0;
4632
4633 num_output_cp = rctx->tcs_shader ?
4634 rctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
4635 rctx->patch_vertices;
4636
4637 return S_028B58_NUM_PATCHES(num_patches) |
4638 S_028B58_HS_NUM_INPUT_CP(rctx->patch_vertices) |
4639 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp);
4640 }
4641
evergreen_set_ls_hs_config(struct r600_context * rctx,struct radeon_cmdbuf * cs,uint32_t ls_hs_config)4642 void evergreen_set_ls_hs_config(struct r600_context *rctx,
4643 struct radeon_cmdbuf *cs,
4644 uint32_t ls_hs_config)
4645 {
4646 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
4647 }
4648
evergreen_set_lds_alloc(struct r600_context * rctx,struct radeon_cmdbuf * cs,uint32_t lds_alloc)4649 void evergreen_set_lds_alloc(struct r600_context *rctx,
4650 struct radeon_cmdbuf *cs,
4651 uint32_t lds_alloc)
4652 {
4653 radeon_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC, lds_alloc);
4654 }
4655
4656 /* on evergreen if you are running tessellation you need to disable dynamic
4657 GPRs to workaround a hardware bug.*/
evergreen_adjust_gprs(struct r600_context * rctx)4658 bool evergreen_adjust_gprs(struct r600_context *rctx)
4659 {
4660 unsigned num_gprs[EG_NUM_HW_STAGES];
4661 unsigned def_gprs[EG_NUM_HW_STAGES];
4662 unsigned cur_gprs[EG_NUM_HW_STAGES];
4663 unsigned new_gprs[EG_NUM_HW_STAGES];
4664 unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs;
4665 unsigned max_gprs;
4666 unsigned i;
4667 unsigned total_gprs;
4668 unsigned tmp[3];
4669 bool rework = false, set_default = false, set_dirty = false;
4670 max_gprs = 0;
4671 for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4672 def_gprs[i] = rctx->default_gprs[i];
4673 max_gprs += def_gprs[i];
4674 }
4675 max_gprs += def_num_clause_temp_gprs * 2;
4676
4677 /* if we have no TESS and dyn gpr is enabled then do nothing. */
4678 if (!rctx->hw_shader_stages[EG_HW_STAGE_HS].shader) {
4679 if (rctx->config_state.dyn_gpr_enabled)
4680 return true;
4681
4682 /* transition back to dyn gpr enabled state */
4683 rctx->config_state.dyn_gpr_enabled = true;
4684 r600_mark_atom_dirty(rctx, &rctx->config_state.atom);
4685 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
4686 return true;
4687 }
4688
4689
4690 /* gather required shader gprs */
4691 for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4692 if (rctx->hw_shader_stages[i].shader)
4693 num_gprs[i] = rctx->hw_shader_stages[i].shader->shader.bc.ngpr;
4694 else
4695 num_gprs[i] = 0;
4696 }
4697
4698 cur_gprs[R600_HW_STAGE_PS] = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
4699 cur_gprs[R600_HW_STAGE_VS] = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
4700 cur_gprs[R600_HW_STAGE_GS] = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
4701 cur_gprs[R600_HW_STAGE_ES] = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
4702 cur_gprs[EG_HW_STAGE_LS] = G_008C0C_NUM_LS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_3);
4703 cur_gprs[EG_HW_STAGE_HS] = G_008C0C_NUM_HS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_3);
4704
4705 total_gprs = 0;
4706 for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4707 new_gprs[i] = num_gprs[i];
4708 total_gprs += num_gprs[i];
4709 }
4710
4711 if (total_gprs > (max_gprs - (2 * def_num_clause_temp_gprs)))
4712 return false;
4713
4714 for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4715 if (new_gprs[i] > cur_gprs[i]) {
4716 rework = true;
4717 break;
4718 }
4719 }
4720
4721 if (rctx->config_state.dyn_gpr_enabled) {
4722 set_dirty = true;
4723 rctx->config_state.dyn_gpr_enabled = false;
4724 }
4725
4726 if (rework) {
4727 set_default = true;
4728 for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4729 if (new_gprs[i] > def_gprs[i])
4730 set_default = false;
4731 }
4732
4733 if (set_default) {
4734 for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4735 new_gprs[i] = def_gprs[i];
4736 }
4737 } else {
4738 unsigned ps_value = max_gprs;
4739
4740 ps_value -= (def_num_clause_temp_gprs * 2);
4741 for (i = R600_HW_STAGE_VS; i < EG_NUM_HW_STAGES; i++)
4742 ps_value -= new_gprs[i];
4743
4744 new_gprs[R600_HW_STAGE_PS] = ps_value;
4745 }
4746
4747 tmp[0] = S_008C04_NUM_PS_GPRS(new_gprs[R600_HW_STAGE_PS]) |
4748 S_008C04_NUM_VS_GPRS(new_gprs[R600_HW_STAGE_VS]) |
4749 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs);
4750
4751 tmp[1] = S_008C08_NUM_ES_GPRS(new_gprs[R600_HW_STAGE_ES]) |
4752 S_008C08_NUM_GS_GPRS(new_gprs[R600_HW_STAGE_GS]);
4753
4754 tmp[2] = S_008C0C_NUM_HS_GPRS(new_gprs[EG_HW_STAGE_HS]) |
4755 S_008C0C_NUM_LS_GPRS(new_gprs[EG_HW_STAGE_LS]);
4756
4757 if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp[0] ||
4758 rctx->config_state.sq_gpr_resource_mgmt_2 != tmp[1] ||
4759 rctx->config_state.sq_gpr_resource_mgmt_3 != tmp[2]) {
4760 rctx->config_state.sq_gpr_resource_mgmt_1 = tmp[0];
4761 rctx->config_state.sq_gpr_resource_mgmt_2 = tmp[1];
4762 rctx->config_state.sq_gpr_resource_mgmt_3 = tmp[2];
4763 set_dirty = true;
4764 }
4765 }
4766
4767
4768 if (set_dirty) {
4769 r600_mark_atom_dirty(rctx, &rctx->config_state.atom);
4770 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
4771 }
4772 return true;
4773 }
4774
4775 #define AC_ENCODE_TRACE_POINT(id) (0xcafe0000 | ((id) & 0xffff))
4776
eg_trace_emit(struct r600_context * rctx)4777 void eg_trace_emit(struct r600_context *rctx)
4778 {
4779 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
4780 unsigned reloc;
4781
4782 if (rctx->b.chip_class < EVERGREEN)
4783 return;
4784
4785 /* This must be done after r600_need_cs_space. */
4786 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4787 (struct r600_resource*)rctx->trace_buf, RADEON_USAGE_WRITE,
4788 RADEON_PRIO_CP_DMA);
4789
4790 rctx->trace_id++;
4791 radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rctx->trace_buf,
4792 RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
4793 radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
4794 radeon_emit(cs, rctx->trace_buf->gpu_address);
4795 radeon_emit(cs, rctx->trace_buf->gpu_address >> 32 | MEM_WRITE_32_BITS | MEM_WRITE_CONFIRM);
4796 radeon_emit(cs, rctx->trace_id);
4797 radeon_emit(cs, 0);
4798 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4799 radeon_emit(cs, reloc);
4800 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4801 radeon_emit(cs, AC_ENCODE_TRACE_POINT(rctx->trace_id));
4802 }
4803
evergreen_emit_set_append_cnt(struct r600_context * rctx,struct r600_shader_atomic * atomic,struct r600_resource * resource,uint32_t pkt_flags)4804 static void evergreen_emit_set_append_cnt(struct r600_context *rctx,
4805 struct r600_shader_atomic *atomic,
4806 struct r600_resource *resource,
4807 uint32_t pkt_flags)
4808 {
4809 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
4810 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4811 resource,
4812 RADEON_USAGE_READ,
4813 RADEON_PRIO_SHADER_RW_BUFFER);
4814 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4815 uint32_t base_reg_0 = R_02872C_GDS_APPEND_COUNT_0;
4816
4817 uint32_t reg_val = (base_reg_0 + atomic->hw_idx * 4 - EVERGREEN_CONTEXT_REG_OFFSET) >> 2;
4818
4819 radeon_emit(cs, PKT3(PKT3_SET_APPEND_CNT, 2, 0) | pkt_flags);
4820 radeon_emit(cs, (reg_val << 16) | 0x3);
4821 radeon_emit(cs, dst_offset & 0xfffffffc);
4822 radeon_emit(cs, (dst_offset >> 32) & 0xff);
4823 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4824 radeon_emit(cs, reloc);
4825 }
4826
evergreen_emit_event_write_eos(struct r600_context * rctx,struct r600_shader_atomic * atomic,struct r600_resource * resource,uint32_t pkt_flags)4827 static void evergreen_emit_event_write_eos(struct r600_context *rctx,
4828 struct r600_shader_atomic *atomic,
4829 struct r600_resource *resource,
4830 uint32_t pkt_flags)
4831 {
4832 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
4833 uint32_t event = EVENT_TYPE_PS_DONE;
4834 uint32_t base_reg_0 = R_02872C_GDS_APPEND_COUNT_0;
4835 uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4836 resource,
4837 RADEON_USAGE_WRITE,
4838 RADEON_PRIO_SHADER_RW_BUFFER);
4839 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4840 uint32_t reg_val = (base_reg_0 + atomic->hw_idx * 4) >> 2;
4841
4842 if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
4843 event = EVENT_TYPE_CS_DONE;
4844
4845 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
4846 radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
4847 radeon_emit(cs, (dst_offset) & 0xffffffff);
4848 radeon_emit(cs, (0 << 29) | ((dst_offset >> 32) & 0xff));
4849 radeon_emit(cs, reg_val);
4850 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4851 radeon_emit(cs, reloc);
4852 }
4853
cayman_emit_event_write_eos(struct r600_context * rctx,struct r600_shader_atomic * atomic,struct r600_resource * resource,uint32_t pkt_flags)4854 static void cayman_emit_event_write_eos(struct r600_context *rctx,
4855 struct r600_shader_atomic *atomic,
4856 struct r600_resource *resource,
4857 uint32_t pkt_flags)
4858 {
4859 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
4860 uint32_t event = EVENT_TYPE_PS_DONE;
4861 uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4862 resource,
4863 RADEON_USAGE_WRITE,
4864 RADEON_PRIO_SHADER_RW_BUFFER);
4865 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4866
4867 if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
4868 event = EVENT_TYPE_CS_DONE;
4869
4870 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
4871 radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
4872 radeon_emit(cs, (dst_offset) & 0xffffffff);
4873 radeon_emit(cs, (1 << 29) | ((dst_offset >> 32) & 0xff));
4874 radeon_emit(cs, (atomic->hw_idx) | (1 << 16));
4875 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4876 radeon_emit(cs, reloc);
4877 }
4878
4879 /* writes count from a buffer into GDS */
cayman_write_count_to_gds(struct r600_context * rctx,struct r600_shader_atomic * atomic,struct r600_resource * resource,uint32_t pkt_flags)4880 static void cayman_write_count_to_gds(struct r600_context *rctx,
4881 struct r600_shader_atomic *atomic,
4882 struct r600_resource *resource,
4883 uint32_t pkt_flags)
4884 {
4885 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
4886 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4887 resource,
4888 RADEON_USAGE_READ,
4889 RADEON_PRIO_SHADER_RW_BUFFER);
4890 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4891
4892 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0) | pkt_flags);
4893 radeon_emit(cs, dst_offset & 0xffffffff);
4894 radeon_emit(cs, PKT3_CP_DMA_CP_SYNC | PKT3_CP_DMA_DST_SEL(1) | ((dst_offset >> 32) & 0xff));// GDS
4895 radeon_emit(cs, atomic->hw_idx * 4);
4896 radeon_emit(cs, 0);
4897 radeon_emit(cs, PKT3_CP_DMA_CMD_DAS | 4);
4898 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4899 radeon_emit(cs, reloc);
4900 }
4901
evergreen_emit_atomic_buffer_setup_count(struct r600_context * rctx,struct r600_pipe_shader * cs_shader,struct r600_shader_atomic * combined_atomics,uint8_t * atomic_used_mask_p)4902 void evergreen_emit_atomic_buffer_setup_count(struct r600_context *rctx,
4903 struct r600_pipe_shader *cs_shader,
4904 struct r600_shader_atomic *combined_atomics,
4905 uint8_t *atomic_used_mask_p)
4906 {
4907 uint8_t atomic_used_mask = 0;
4908 int i, j, k;
4909 bool is_compute = cs_shader ? true : false;
4910
4911 for (i = 0; i < (is_compute ? 1 : EG_NUM_HW_STAGES); i++) {
4912 uint8_t num_atomic_stage;
4913 struct r600_pipe_shader *pshader;
4914
4915 if (is_compute)
4916 pshader = cs_shader;
4917 else
4918 pshader = rctx->hw_shader_stages[i].shader;
4919 if (!pshader)
4920 continue;
4921
4922 num_atomic_stage = pshader->shader.nhwatomic_ranges;
4923 if (!num_atomic_stage)
4924 continue;
4925
4926 for (j = 0; j < num_atomic_stage; j++) {
4927 struct r600_shader_atomic *atomic = &pshader->shader.atomics[j];
4928 int natomics = atomic->end - atomic->start + 1;
4929
4930 for (k = 0; k < natomics; k++) {
4931 /* seen this in a previous stage */
4932 if (atomic_used_mask & (1u << (atomic->hw_idx + k)))
4933 continue;
4934
4935 combined_atomics[atomic->hw_idx + k].hw_idx = atomic->hw_idx + k;
4936 combined_atomics[atomic->hw_idx + k].buffer_id = atomic->buffer_id;
4937 combined_atomics[atomic->hw_idx + k].start = atomic->start + k;
4938 combined_atomics[atomic->hw_idx + k].end = combined_atomics[atomic->hw_idx + k].start + 1;
4939 atomic_used_mask |= (1u << (atomic->hw_idx + k));
4940 }
4941 }
4942 }
4943 *atomic_used_mask_p = atomic_used_mask;
4944 }
4945
evergreen_emit_atomic_buffer_setup(struct r600_context * rctx,bool is_compute,struct r600_shader_atomic * combined_atomics,uint8_t atomic_used_mask)4946 void evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
4947 bool is_compute,
4948 struct r600_shader_atomic *combined_atomics,
4949 uint8_t atomic_used_mask)
4950 {
4951 struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
4952 unsigned pkt_flags = 0;
4953 uint32_t mask;
4954
4955 if (is_compute)
4956 pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
4957
4958 mask = atomic_used_mask;
4959 if (!mask)
4960 return;
4961
4962 while (mask) {
4963 unsigned atomic_index = u_bit_scan(&mask);
4964 struct r600_shader_atomic *atomic = &combined_atomics[atomic_index];
4965 struct r600_resource *resource = r600_resource(astate->buffer[atomic->buffer_id].buffer);
4966 assert(resource);
4967
4968 if (rctx->b.chip_class == CAYMAN)
4969 cayman_write_count_to_gds(rctx, atomic, resource, pkt_flags);
4970 else
4971 evergreen_emit_set_append_cnt(rctx, atomic, resource, pkt_flags);
4972 }
4973 }
4974
evergreen_emit_atomic_buffer_save(struct r600_context * rctx,bool is_compute,struct r600_shader_atomic * combined_atomics,uint8_t * atomic_used_mask_p)4975 void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
4976 bool is_compute,
4977 struct r600_shader_atomic *combined_atomics,
4978 uint8_t *atomic_used_mask_p)
4979 {
4980 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
4981 struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
4982 uint32_t pkt_flags = 0;
4983 uint32_t event = EVENT_TYPE_PS_DONE;
4984 uint32_t mask;
4985 uint64_t dst_offset;
4986 unsigned reloc;
4987
4988 if (is_compute)
4989 pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
4990
4991 mask = *atomic_used_mask_p;
4992 if (!mask)
4993 return;
4994
4995 while (mask) {
4996 unsigned atomic_index = u_bit_scan(&mask);
4997 struct r600_shader_atomic *atomic = &combined_atomics[atomic_index];
4998 struct r600_resource *resource = r600_resource(astate->buffer[atomic->buffer_id].buffer);
4999 assert(resource);
5000
5001 if (rctx->b.chip_class == CAYMAN)
5002 cayman_emit_event_write_eos(rctx, atomic, resource, pkt_flags);
5003 else
5004 evergreen_emit_event_write_eos(rctx, atomic, resource, pkt_flags);
5005 }
5006
5007 if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
5008 event = EVENT_TYPE_CS_DONE;
5009
5010 ++rctx->append_fence_id;
5011 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
5012 r600_resource(rctx->append_fence),
5013 RADEON_USAGE_READWRITE,
5014 RADEON_PRIO_SHADER_RW_BUFFER);
5015 dst_offset = r600_resource(rctx->append_fence)->gpu_address;
5016 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
5017 radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
5018 radeon_emit(cs, dst_offset & 0xffffffff);
5019 radeon_emit(cs, (2 << 29) | ((dst_offset >> 32) & 0xff));
5020 radeon_emit(cs, rctx->append_fence_id);
5021 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
5022 radeon_emit(cs, reloc);
5023
5024 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0) | pkt_flags);
5025 radeon_emit(cs, WAIT_REG_MEM_GEQUAL | WAIT_REG_MEM_MEMORY | (1 << 8));
5026 radeon_emit(cs, dst_offset & 0xffffffff);
5027 radeon_emit(cs, ((dst_offset >> 32) & 0xff));
5028 radeon_emit(cs, rctx->append_fence_id);
5029 radeon_emit(cs, 0xffffffff);
5030 radeon_emit(cs, 0xa);
5031 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
5032 radeon_emit(cs, reloc);
5033 }
5034