1 /*
2 * Copyright 2010 Red Hat Inc.
3 * 2010 Jerome Glisse
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
26 */
27 #include "r600_formats.h"
28 #include "r600_shader.h"
29 #include "r600d.h"
30
31 #include "util/format/u_format_s3tc.h"
32 #include "util/u_draw.h"
33 #include "util/u_index_modify.h"
34 #include "util/u_memory.h"
35 #include "util/u_upload_mgr.h"
36 #include "util/u_math.h"
37 #include "tgsi/tgsi_parse.h"
38 #include "tgsi/tgsi_scan.h"
39 #include "tgsi/tgsi_ureg.h"
40
41 #include "nir.h"
42 #include "nir/nir_to_tgsi_info.h"
43 #include "tgsi/tgsi_from_mesa.h"
44
r600_init_command_buffer(struct r600_command_buffer * cb,unsigned num_dw)45 void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw)
46 {
47 assert(!cb->buf);
48 cb->buf = CALLOC(1, 4 * num_dw);
49 cb->max_num_dw = num_dw;
50 }
51
r600_release_command_buffer(struct r600_command_buffer * cb)52 void r600_release_command_buffer(struct r600_command_buffer *cb)
53 {
54 FREE(cb->buf);
55 }
56
r600_add_atom(struct r600_context * rctx,struct r600_atom * atom,unsigned id)57 void r600_add_atom(struct r600_context *rctx,
58 struct r600_atom *atom,
59 unsigned id)
60 {
61 assert(id < R600_NUM_ATOMS);
62 assert(rctx->atoms[id] == NULL);
63 rctx->atoms[id] = atom;
64 atom->id = id;
65 }
66
r600_init_atom(struct r600_context * rctx,struct r600_atom * atom,unsigned id,void (* emit)(struct r600_context * ctx,struct r600_atom * state),unsigned num_dw)67 void r600_init_atom(struct r600_context *rctx,
68 struct r600_atom *atom,
69 unsigned id,
70 void (*emit)(struct r600_context *ctx, struct r600_atom *state),
71 unsigned num_dw)
72 {
73 atom->emit = (void*)emit;
74 atom->num_dw = num_dw;
75 r600_add_atom(rctx, atom, id);
76 }
77
r600_emit_cso_state(struct r600_context * rctx,struct r600_atom * atom)78 void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom)
79 {
80 r600_emit_command_buffer(&rctx->b.gfx.cs, ((struct r600_cso_state*)atom)->cb);
81 }
82
r600_emit_alphatest_state(struct r600_context * rctx,struct r600_atom * atom)83 void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom)
84 {
85 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
86 struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom;
87 unsigned alpha_ref = a->sx_alpha_ref;
88
89 if (rctx->b.chip_class >= EVERGREEN && a->cb0_export_16bpc) {
90 alpha_ref &= ~0x1FFF;
91 }
92
93 radeon_set_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL,
94 a->sx_alpha_test_control |
95 S_028410_ALPHA_TEST_BYPASS(a->bypass));
96 radeon_set_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref);
97 }
98
r600_memory_barrier(struct pipe_context * ctx,unsigned flags)99 static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
100 {
101 struct r600_context *rctx = (struct r600_context *)ctx;
102
103 if (!(flags & ~PIPE_BARRIER_UPDATE))
104 return;
105
106 if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
107 rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
108
109 if (flags & (PIPE_BARRIER_VERTEX_BUFFER |
110 PIPE_BARRIER_SHADER_BUFFER |
111 PIPE_BARRIER_TEXTURE |
112 PIPE_BARRIER_IMAGE |
113 PIPE_BARRIER_STREAMOUT_BUFFER |
114 PIPE_BARRIER_GLOBAL_BUFFER)) {
115 rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE|
116 R600_CONTEXT_INV_TEX_CACHE;
117 }
118
119 if (flags & (PIPE_BARRIER_FRAMEBUFFER|
120 PIPE_BARRIER_IMAGE))
121 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV;
122
123 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
124 }
125
r600_texture_barrier(struct pipe_context * ctx,unsigned flags)126 static void r600_texture_barrier(struct pipe_context *ctx, unsigned flags)
127 {
128 struct r600_context *rctx = (struct r600_context *)ctx;
129
130 rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
131 R600_CONTEXT_FLUSH_AND_INV_CB |
132 R600_CONTEXT_FLUSH_AND_INV |
133 R600_CONTEXT_WAIT_3D_IDLE;
134 rctx->framebuffer.do_update_surf_dirtiness = true;
135 }
136
r600_conv_pipe_prim(unsigned prim)137 static unsigned r600_conv_pipe_prim(unsigned prim)
138 {
139 static const unsigned prim_conv[] = {
140 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
141 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
142 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
143 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
144 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
145 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
146 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
147 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
148 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
149 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
150 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
151 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
152 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
153 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
154 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
155 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
156 };
157 assert(prim < ARRAY_SIZE(prim_conv));
158 return prim_conv[prim];
159 }
160
r600_conv_prim_to_gs_out(unsigned mode)161 unsigned r600_conv_prim_to_gs_out(unsigned mode)
162 {
163 static const int prim_conv[] = {
164 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
165 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
166 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
167 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
168 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
169 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
170 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
171 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
172 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
173 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
174 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
175 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
176 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
177 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
178 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
179 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
180 };
181 assert(mode < ARRAY_SIZE(prim_conv));
182
183 return prim_conv[mode];
184 }
185
186 /* common state between evergreen and r600 */
187
r600_bind_blend_state_internal(struct r600_context * rctx,struct r600_blend_state * blend,bool blend_disable)188 static void r600_bind_blend_state_internal(struct r600_context *rctx,
189 struct r600_blend_state *blend, bool blend_disable)
190 {
191 unsigned color_control;
192 bool update_cb = false;
193
194 rctx->alpha_to_one = blend->alpha_to_one;
195 rctx->dual_src_blend = blend->dual_src_blend;
196
197 if (!blend_disable) {
198 r600_set_cso_state_with_cb(rctx, &rctx->blend_state, blend, &blend->buffer);
199 color_control = blend->cb_color_control;
200 } else {
201 /* Blending is disabled. */
202 r600_set_cso_state_with_cb(rctx, &rctx->blend_state, blend, &blend->buffer_no_blend);
203 color_control = blend->cb_color_control_no_blend;
204 }
205
206 /* Update derived states. */
207 if (rctx->cb_misc_state.blend_colormask != blend->cb_target_mask) {
208 rctx->cb_misc_state.blend_colormask = blend->cb_target_mask;
209 update_cb = true;
210 }
211 if (rctx->b.chip_class <= R700 &&
212 rctx->cb_misc_state.cb_color_control != color_control) {
213 rctx->cb_misc_state.cb_color_control = color_control;
214 update_cb = true;
215 }
216 if (rctx->cb_misc_state.dual_src_blend != blend->dual_src_blend) {
217 rctx->cb_misc_state.dual_src_blend = blend->dual_src_blend;
218 update_cb = true;
219 }
220 if (update_cb) {
221 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
222 }
223 if (rctx->framebuffer.dual_src_blend != blend->dual_src_blend) {
224 rctx->framebuffer.dual_src_blend = blend->dual_src_blend;
225 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
226 }
227 }
228
r600_bind_blend_state(struct pipe_context * ctx,void * state)229 static void r600_bind_blend_state(struct pipe_context *ctx, void *state)
230 {
231 struct r600_context *rctx = (struct r600_context *)ctx;
232 struct r600_blend_state *blend = (struct r600_blend_state *)state;
233
234 if (!blend) {
235 r600_set_cso_state_with_cb(rctx, &rctx->blend_state, NULL, NULL);
236 return;
237 }
238
239 r600_bind_blend_state_internal(rctx, blend, rctx->force_blend_disable);
240 }
241
r600_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * state)242 static void r600_set_blend_color(struct pipe_context *ctx,
243 const struct pipe_blend_color *state)
244 {
245 struct r600_context *rctx = (struct r600_context *)ctx;
246
247 rctx->blend_color.state = *state;
248 r600_mark_atom_dirty(rctx, &rctx->blend_color.atom);
249 }
250
r600_emit_blend_color(struct r600_context * rctx,struct r600_atom * atom)251 void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
252 {
253 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
254 struct pipe_blend_color *state = &rctx->blend_color.state;
255
256 radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
257 radeon_emit(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */
258 radeon_emit(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */
259 radeon_emit(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */
260 radeon_emit(cs, fui(state->color[3])); /* R_028420_CB_BLEND_ALPHA */
261 }
262
r600_emit_vgt_state(struct r600_context * rctx,struct r600_atom * atom)263 void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom)
264 {
265 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
266 struct r600_vgt_state *a = (struct r600_vgt_state *)atom;
267
268 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
269 radeon_set_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2);
270 radeon_emit(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */
271 radeon_emit(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
272 if (a->last_draw_was_indirect) {
273 a->last_draw_was_indirect = false;
274 radeon_set_ctl_const(cs, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
275 }
276 }
277
r600_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * state)278 static void r600_set_clip_state(struct pipe_context *ctx,
279 const struct pipe_clip_state *state)
280 {
281 struct r600_context *rctx = (struct r600_context *)ctx;
282
283 rctx->clip_state.state = *state;
284 r600_mark_atom_dirty(rctx, &rctx->clip_state.atom);
285 rctx->driver_consts[PIPE_SHADER_VERTEX].vs_ucp_dirty = true;
286 }
287
r600_set_stencil_ref(struct pipe_context * ctx,const struct r600_stencil_ref state)288 static void r600_set_stencil_ref(struct pipe_context *ctx,
289 const struct r600_stencil_ref state)
290 {
291 struct r600_context *rctx = (struct r600_context *)ctx;
292
293 rctx->stencil_ref.state = state;
294 r600_mark_atom_dirty(rctx, &rctx->stencil_ref.atom);
295 }
296
r600_emit_stencil_ref(struct r600_context * rctx,struct r600_atom * atom)297 void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
298 {
299 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
300 struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom;
301
302 radeon_set_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
303 radeon_emit(cs, /* R_028430_DB_STENCILREFMASK */
304 S_028430_STENCILREF(a->state.ref_value[0]) |
305 S_028430_STENCILMASK(a->state.valuemask[0]) |
306 S_028430_STENCILWRITEMASK(a->state.writemask[0]));
307 radeon_emit(cs, /* R_028434_DB_STENCILREFMASK_BF */
308 S_028434_STENCILREF_BF(a->state.ref_value[1]) |
309 S_028434_STENCILMASK_BF(a->state.valuemask[1]) |
310 S_028434_STENCILWRITEMASK_BF(a->state.writemask[1]));
311 }
312
r600_set_pipe_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref state)313 static void r600_set_pipe_stencil_ref(struct pipe_context *ctx,
314 const struct pipe_stencil_ref state)
315 {
316 struct r600_context *rctx = (struct r600_context *)ctx;
317 struct r600_dsa_state *dsa = (struct r600_dsa_state*)rctx->dsa_state.cso;
318 struct r600_stencil_ref ref;
319
320 rctx->stencil_ref.pipe_state = state;
321
322 if (!dsa)
323 return;
324
325 ref.ref_value[0] = state.ref_value[0];
326 ref.ref_value[1] = state.ref_value[1];
327 ref.valuemask[0] = dsa->valuemask[0];
328 ref.valuemask[1] = dsa->valuemask[1];
329 ref.writemask[0] = dsa->writemask[0];
330 ref.writemask[1] = dsa->writemask[1];
331
332 r600_set_stencil_ref(ctx, ref);
333 }
334
r600_bind_dsa_state(struct pipe_context * ctx,void * state)335 static void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
336 {
337 struct r600_context *rctx = (struct r600_context *)ctx;
338 struct r600_dsa_state *dsa = state;
339 struct r600_stencil_ref ref;
340
341 if (!state) {
342 r600_set_cso_state_with_cb(rctx, &rctx->dsa_state, NULL, NULL);
343 return;
344 }
345
346 r600_set_cso_state_with_cb(rctx, &rctx->dsa_state, dsa, &dsa->buffer);
347
348 ref.ref_value[0] = rctx->stencil_ref.pipe_state.ref_value[0];
349 ref.ref_value[1] = rctx->stencil_ref.pipe_state.ref_value[1];
350 ref.valuemask[0] = dsa->valuemask[0];
351 ref.valuemask[1] = dsa->valuemask[1];
352 ref.writemask[0] = dsa->writemask[0];
353 ref.writemask[1] = dsa->writemask[1];
354 if (rctx->zwritemask != dsa->zwritemask) {
355 rctx->zwritemask = dsa->zwritemask;
356 if (rctx->b.chip_class >= EVERGREEN) {
357 /* work around some issue when not writing to zbuffer
358 * we are having lockup on evergreen so do not enable
359 * hyperz when not writing zbuffer
360 */
361 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
362 }
363 }
364
365 r600_set_stencil_ref(ctx, ref);
366
367 /* Update alphatest state. */
368 if (rctx->alphatest_state.sx_alpha_test_control != dsa->sx_alpha_test_control ||
369 rctx->alphatest_state.sx_alpha_ref != dsa->alpha_ref) {
370 rctx->alphatest_state.sx_alpha_test_control = dsa->sx_alpha_test_control;
371 rctx->alphatest_state.sx_alpha_ref = dsa->alpha_ref;
372 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
373 }
374 }
375
r600_bind_rs_state(struct pipe_context * ctx,void * state)376 static void r600_bind_rs_state(struct pipe_context *ctx, void *state)
377 {
378 struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state;
379 struct r600_context *rctx = (struct r600_context *)ctx;
380
381 if (!state)
382 return;
383
384 rctx->rasterizer = rs;
385
386 r600_set_cso_state_with_cb(rctx, &rctx->rasterizer_state, rs, &rs->buffer);
387
388 if (rs->offset_enable &&
389 (rs->offset_units != rctx->poly_offset_state.offset_units ||
390 rs->offset_scale != rctx->poly_offset_state.offset_scale ||
391 rs->offset_units_unscaled != rctx->poly_offset_state.offset_units_unscaled)) {
392 rctx->poly_offset_state.offset_units = rs->offset_units;
393 rctx->poly_offset_state.offset_scale = rs->offset_scale;
394 rctx->poly_offset_state.offset_units_unscaled = rs->offset_units_unscaled;
395 r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom);
396 }
397
398 /* Update clip_misc_state. */
399 if (rctx->clip_misc_state.pa_cl_clip_cntl != rs->pa_cl_clip_cntl ||
400 rctx->clip_misc_state.clip_plane_enable != rs->clip_plane_enable) {
401 rctx->clip_misc_state.pa_cl_clip_cntl = rs->pa_cl_clip_cntl;
402 rctx->clip_misc_state.clip_plane_enable = rs->clip_plane_enable;
403 r600_mark_atom_dirty(rctx, &rctx->clip_misc_state.atom);
404 }
405
406 r600_viewport_set_rast_deps(&rctx->b, rs->scissor_enable, rs->clip_halfz);
407
408 /* Re-emit PA_SC_LINE_STIPPLE. */
409 rctx->last_primitive_type = -1;
410 }
411
r600_delete_rs_state(struct pipe_context * ctx,void * state)412 static void r600_delete_rs_state(struct pipe_context *ctx, void *state)
413 {
414 struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state;
415
416 r600_release_command_buffer(&rs->buffer);
417 FREE(rs);
418 }
419
r600_sampler_view_destroy(struct pipe_context * ctx,struct pipe_sampler_view * state)420 static void r600_sampler_view_destroy(struct pipe_context *ctx,
421 struct pipe_sampler_view *state)
422 {
423 struct r600_pipe_sampler_view *view = (struct r600_pipe_sampler_view *)state;
424
425 if (view->tex_resource->gpu_address &&
426 view->tex_resource->b.b.target == PIPE_BUFFER)
427 list_delinit(&view->list);
428
429 pipe_resource_reference(&state->texture, NULL);
430 FREE(view);
431 }
432
r600_sampler_states_dirty(struct r600_context * rctx,struct r600_sampler_states * state)433 void r600_sampler_states_dirty(struct r600_context *rctx,
434 struct r600_sampler_states *state)
435 {
436 if (state->dirty_mask) {
437 if (state->dirty_mask & state->has_bordercolor_mask) {
438 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
439 }
440 state->atom.num_dw =
441 util_bitcount(state->dirty_mask & state->has_bordercolor_mask) * 11 +
442 util_bitcount(state->dirty_mask & ~state->has_bordercolor_mask) * 5;
443 r600_mark_atom_dirty(rctx, &state->atom);
444 }
445 }
446
r600_bind_sampler_states(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start,unsigned count,void ** states)447 static void r600_bind_sampler_states(struct pipe_context *pipe,
448 enum pipe_shader_type shader,
449 unsigned start,
450 unsigned count, void **states)
451 {
452 struct r600_context *rctx = (struct r600_context *)pipe;
453 struct r600_textures_info *dst = &rctx->samplers[shader];
454 struct r600_pipe_sampler_state **rstates = (struct r600_pipe_sampler_state**)states;
455 int seamless_cube_map = -1;
456 unsigned i;
457 /* This sets 1-bit for states with index >= count. */
458 uint32_t disable_mask = ~((1ull << count) - 1);
459 /* These are the new states set by this function. */
460 uint32_t new_mask = 0;
461
462 assert(start == 0); /* XXX fix below */
463
464 if (!states) {
465 disable_mask = ~0u;
466 count = 0;
467 }
468
469 for (i = 0; i < count; i++) {
470 struct r600_pipe_sampler_state *rstate = rstates[i];
471
472 if (rstate == dst->states.states[i]) {
473 continue;
474 }
475
476 if (rstate) {
477 if (rstate->border_color_use) {
478 dst->states.has_bordercolor_mask |= 1 << i;
479 } else {
480 dst->states.has_bordercolor_mask &= ~(1 << i);
481 }
482 seamless_cube_map = rstate->seamless_cube_map;
483
484 new_mask |= 1 << i;
485 } else {
486 disable_mask |= 1 << i;
487 }
488 }
489
490 memcpy(dst->states.states, rstates, sizeof(void*) * count);
491 memset(dst->states.states + count, 0, sizeof(void*) * (NUM_TEX_UNITS - count));
492
493 dst->states.enabled_mask &= ~disable_mask;
494 dst->states.dirty_mask &= dst->states.enabled_mask;
495 dst->states.enabled_mask |= new_mask;
496 dst->states.dirty_mask |= new_mask;
497 dst->states.has_bordercolor_mask &= dst->states.enabled_mask;
498
499 r600_sampler_states_dirty(rctx, &dst->states);
500
501 /* Seamless cubemap state. */
502 if (rctx->b.chip_class <= R700 &&
503 seamless_cube_map != -1 &&
504 seamless_cube_map != rctx->seamless_cube_map.enabled) {
505 /* change in TA_CNTL_AUX need a pipeline flush */
506 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
507 rctx->seamless_cube_map.enabled = seamless_cube_map;
508 r600_mark_atom_dirty(rctx, &rctx->seamless_cube_map.atom);
509 }
510 }
511
r600_delete_sampler_state(struct pipe_context * ctx,void * state)512 static void r600_delete_sampler_state(struct pipe_context *ctx, void *state)
513 {
514 free(state);
515 }
516
r600_delete_blend_state(struct pipe_context * ctx,void * state)517 static void r600_delete_blend_state(struct pipe_context *ctx, void *state)
518 {
519 struct r600_context *rctx = (struct r600_context *)ctx;
520 struct r600_blend_state *blend = (struct r600_blend_state*)state;
521
522 if (rctx->blend_state.cso == state) {
523 ctx->bind_blend_state(ctx, NULL);
524 }
525
526 r600_release_command_buffer(&blend->buffer);
527 r600_release_command_buffer(&blend->buffer_no_blend);
528 FREE(blend);
529 }
530
r600_delete_dsa_state(struct pipe_context * ctx,void * state)531 static void r600_delete_dsa_state(struct pipe_context *ctx, void *state)
532 {
533 struct r600_context *rctx = (struct r600_context *)ctx;
534 struct r600_dsa_state *dsa = (struct r600_dsa_state *)state;
535
536 if (rctx->dsa_state.cso == state) {
537 ctx->bind_depth_stencil_alpha_state(ctx, NULL);
538 }
539
540 r600_release_command_buffer(&dsa->buffer);
541 free(dsa);
542 }
543
r600_bind_vertex_elements(struct pipe_context * ctx,void * state)544 static void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
545 {
546 struct r600_context *rctx = (struct r600_context *)ctx;
547
548 r600_set_cso_state(rctx, &rctx->vertex_fetch_shader, state);
549 }
550
r600_delete_vertex_elements(struct pipe_context * ctx,void * state)551 static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
552 {
553 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state;
554 if (shader)
555 r600_resource_reference(&shader->buffer, NULL);
556 FREE(shader);
557 }
558
r600_vertex_buffers_dirty(struct r600_context * rctx)559 void r600_vertex_buffers_dirty(struct r600_context *rctx)
560 {
561 if (rctx->vertex_buffer_state.dirty_mask) {
562 rctx->vertex_buffer_state.atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 12 : 11) *
563 util_bitcount(rctx->vertex_buffer_state.dirty_mask);
564 r600_mark_atom_dirty(rctx, &rctx->vertex_buffer_state.atom);
565 }
566 }
567
r600_set_vertex_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,const struct pipe_vertex_buffer * input)568 static void r600_set_vertex_buffers(struct pipe_context *ctx,
569 unsigned start_slot, unsigned count,
570 unsigned unbind_num_trailing_slots,
571 bool take_ownership,
572 const struct pipe_vertex_buffer *input)
573 {
574 struct r600_context *rctx = (struct r600_context *)ctx;
575 struct r600_vertexbuf_state *state = &rctx->vertex_buffer_state;
576 struct pipe_vertex_buffer *vb = state->vb + start_slot;
577 unsigned i;
578 uint32_t disable_mask = 0;
579 /* These are the new buffers set by this function. */
580 uint32_t new_buffer_mask = 0;
581
582 /* Set vertex buffers. */
583 if (input) {
584 for (i = 0; i < count; i++) {
585 if ((input[i].buffer.resource != vb[i].buffer.resource) ||
586 (vb[i].stride != input[i].stride) ||
587 (vb[i].buffer_offset != input[i].buffer_offset) ||
588 (vb[i].is_user_buffer != input[i].is_user_buffer)) {
589 if (input[i].buffer.resource) {
590 vb[i].stride = input[i].stride;
591 vb[i].buffer_offset = input[i].buffer_offset;
592 if (take_ownership) {
593 pipe_resource_reference(&vb[i].buffer.resource, NULL);
594 vb[i].buffer.resource = input[i].buffer.resource;
595 } else {
596 pipe_resource_reference(&vb[i].buffer.resource,
597 input[i].buffer.resource);
598 }
599 new_buffer_mask |= 1 << i;
600 r600_context_add_resource_size(ctx, input[i].buffer.resource);
601 } else {
602 pipe_resource_reference(&vb[i].buffer.resource, NULL);
603 disable_mask |= 1 << i;
604 }
605 }
606 }
607 } else {
608 for (i = 0; i < count; i++) {
609 pipe_resource_reference(&vb[i].buffer.resource, NULL);
610 }
611 disable_mask = ((1ull << count) - 1);
612 }
613
614 for (i = 0; i < unbind_num_trailing_slots; i++) {
615 pipe_resource_reference(&vb[count + i].buffer.resource, NULL);
616 }
617 disable_mask |= ((1ull << unbind_num_trailing_slots) - 1) << count;
618
619 disable_mask <<= start_slot;
620 new_buffer_mask <<= start_slot;
621
622 rctx->vertex_buffer_state.enabled_mask &= ~disable_mask;
623 rctx->vertex_buffer_state.dirty_mask &= rctx->vertex_buffer_state.enabled_mask;
624 rctx->vertex_buffer_state.enabled_mask |= new_buffer_mask;
625 rctx->vertex_buffer_state.dirty_mask |= new_buffer_mask;
626
627 r600_vertex_buffers_dirty(rctx);
628 }
629
r600_sampler_views_dirty(struct r600_context * rctx,struct r600_samplerview_state * state)630 void r600_sampler_views_dirty(struct r600_context *rctx,
631 struct r600_samplerview_state *state)
632 {
633 if (state->dirty_mask) {
634 state->atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 14 : 13) *
635 util_bitcount(state->dirty_mask);
636 r600_mark_atom_dirty(rctx, &state->atom);
637 }
638 }
639
r600_set_sampler_views(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)640 static void r600_set_sampler_views(struct pipe_context *pipe,
641 enum pipe_shader_type shader,
642 unsigned start, unsigned count,
643 unsigned unbind_num_trailing_slots,
644 bool take_ownership,
645 struct pipe_sampler_view **views)
646 {
647 struct r600_context *rctx = (struct r600_context *) pipe;
648 struct r600_textures_info *dst = &rctx->samplers[shader];
649 struct r600_pipe_sampler_view **rviews = (struct r600_pipe_sampler_view **)views;
650 uint32_t dirty_sampler_states_mask = 0;
651 unsigned i;
652 /* This sets 1-bit for textures with index >= count. */
653 uint32_t disable_mask = ~((1ull << count) - 1);
654 /* These are the new textures set by this function. */
655 uint32_t new_mask = 0;
656
657 /* Set textures with index >= count to NULL. */
658 uint32_t remaining_mask;
659
660 assert(start == 0); /* XXX fix below */
661
662 if (!views) {
663 disable_mask = ~0u;
664 count = 0;
665 }
666
667 remaining_mask = dst->views.enabled_mask & disable_mask;
668
669 while (remaining_mask) {
670 i = u_bit_scan(&remaining_mask);
671 assert(dst->views.views[i]);
672
673 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL);
674 }
675
676 for (i = 0; i < count; i++) {
677 if (rviews[i] == dst->views.views[i]) {
678 if (take_ownership) {
679 struct pipe_sampler_view *view = views[i];
680 pipe_sampler_view_reference(&view, NULL);
681 }
682 continue;
683 }
684
685 if (rviews[i]) {
686 struct r600_texture *rtex =
687 (struct r600_texture*)rviews[i]->base.texture;
688 bool is_buffer = rviews[i]->base.texture->target == PIPE_BUFFER;
689
690 if (!is_buffer && rtex->db_compatible) {
691 dst->views.compressed_depthtex_mask |= 1 << i;
692 } else {
693 dst->views.compressed_depthtex_mask &= ~(1 << i);
694 }
695
696 /* Track compressed colorbuffers. */
697 if (!is_buffer && rtex->cmask.size) {
698 dst->views.compressed_colortex_mask |= 1 << i;
699 } else {
700 dst->views.compressed_colortex_mask &= ~(1 << i);
701 }
702
703 /* Changing from array to non-arrays textures and vice versa requires
704 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */
705 if (rctx->b.chip_class <= R700 &&
706 (dst->states.enabled_mask & (1 << i)) &&
707 (rviews[i]->base.texture->target == PIPE_TEXTURE_1D_ARRAY ||
708 rviews[i]->base.texture->target == PIPE_TEXTURE_2D_ARRAY) != dst->is_array_sampler[i]) {
709 dirty_sampler_states_mask |= 1 << i;
710 }
711
712 if (take_ownership) {
713 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL);
714 dst->views.views[i] = (struct r600_pipe_sampler_view*)views[i];
715 } else {
716 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], views[i]);
717 }
718 new_mask |= 1 << i;
719 r600_context_add_resource_size(pipe, views[i]->texture);
720 } else {
721 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL);
722 disable_mask |= 1 << i;
723 }
724 }
725
726 dst->views.enabled_mask &= ~disable_mask;
727 dst->views.dirty_mask &= dst->views.enabled_mask;
728 dst->views.enabled_mask |= new_mask;
729 dst->views.dirty_mask |= new_mask;
730 dst->views.compressed_depthtex_mask &= dst->views.enabled_mask;
731 dst->views.compressed_colortex_mask &= dst->views.enabled_mask;
732 dst->views.dirty_buffer_constants = TRUE;
733 r600_sampler_views_dirty(rctx, &dst->views);
734
735 if (dirty_sampler_states_mask) {
736 dst->states.dirty_mask |= dirty_sampler_states_mask;
737 r600_sampler_states_dirty(rctx, &dst->states);
738 }
739 }
740
r600_update_compressed_colortex_mask(struct r600_samplerview_state * views)741 static void r600_update_compressed_colortex_mask(struct r600_samplerview_state *views)
742 {
743 uint32_t mask = views->enabled_mask;
744
745 while (mask) {
746 unsigned i = u_bit_scan(&mask);
747 struct pipe_resource *res = views->views[i]->base.texture;
748
749 if (res && res->target != PIPE_BUFFER) {
750 struct r600_texture *rtex = (struct r600_texture *)res;
751
752 if (rtex->cmask.size) {
753 views->compressed_colortex_mask |= 1 << i;
754 } else {
755 views->compressed_colortex_mask &= ~(1 << i);
756 }
757 }
758 }
759 }
760
r600_get_hw_atomic_count(const struct pipe_context * ctx,enum pipe_shader_type shader)761 static int r600_get_hw_atomic_count(const struct pipe_context *ctx,
762 enum pipe_shader_type shader)
763 {
764 const struct r600_context *rctx = (struct r600_context *)ctx;
765 int value = 0;
766 switch (shader) {
767 case PIPE_SHADER_FRAGMENT:
768 case PIPE_SHADER_COMPUTE:
769 default:
770 break;
771 case PIPE_SHADER_VERTEX:
772 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
773 break;
774 case PIPE_SHADER_GEOMETRY:
775 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
776 rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
777 break;
778 case PIPE_SHADER_TESS_EVAL:
779 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
780 rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
781 (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0);
782 break;
783 case PIPE_SHADER_TESS_CTRL:
784 value = rctx->ps_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
785 rctx->vs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] +
786 (rctx->gs_shader ? rctx->gs_shader->info.file_count[TGSI_FILE_HW_ATOMIC] : 0) +
787 rctx->tes_shader->info.file_count[TGSI_FILE_HW_ATOMIC];
788 break;
789 }
790 return value;
791 }
792
r600_update_compressed_colortex_mask_images(struct r600_image_state * images)793 static void r600_update_compressed_colortex_mask_images(struct r600_image_state *images)
794 {
795 uint32_t mask = images->enabled_mask;
796
797 while (mask) {
798 unsigned i = u_bit_scan(&mask);
799 struct pipe_resource *res = images->views[i].base.resource;
800
801 if (res && res->target != PIPE_BUFFER) {
802 struct r600_texture *rtex = (struct r600_texture *)res;
803
804 if (rtex->cmask.size) {
805 images->compressed_colortex_mask |= 1 << i;
806 } else {
807 images->compressed_colortex_mask &= ~(1 << i);
808 }
809 }
810 }
811 }
812
813 /* Compute the key for the hw shader variant */
r600_shader_selector_key(const struct pipe_context * ctx,const struct r600_pipe_shader_selector * sel,union r600_shader_key * key)814 static inline void r600_shader_selector_key(const struct pipe_context *ctx,
815 const struct r600_pipe_shader_selector *sel,
816 union r600_shader_key *key)
817 {
818 const struct r600_context *rctx = (struct r600_context *)ctx;
819 memset(key, 0, sizeof(*key));
820
821 switch (sel->type) {
822 case PIPE_SHADER_VERTEX: {
823 key->vs.as_ls = (rctx->tes_shader != NULL);
824 if (!key->vs.as_ls)
825 key->vs.as_es = (rctx->gs_shader != NULL);
826
827 if (rctx->ps_shader->current->shader.gs_prim_id_input && !rctx->gs_shader) {
828 key->vs.as_gs_a = true;
829 key->vs.prim_id_out = rctx->ps_shader->current->shader.input[rctx->ps_shader->current->shader.ps_prim_id_input].spi_sid;
830 }
831 key->vs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_VERTEX);
832 break;
833 }
834 case PIPE_SHADER_GEOMETRY:
835 key->gs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_GEOMETRY);
836 key->gs.tri_strip_adj_fix = rctx->gs_tri_strip_adj_fix;
837 break;
838 case PIPE_SHADER_FRAGMENT: {
839 if (rctx->ps_shader->info.images_declared)
840 key->ps.image_size_const_offset = util_last_bit(rctx->samplers[PIPE_SHADER_FRAGMENT].views.enabled_mask);
841 key->ps.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_FRAGMENT);
842 key->ps.color_two_side = rctx->rasterizer && rctx->rasterizer->two_side;
843 key->ps.alpha_to_one = rctx->alpha_to_one &&
844 rctx->rasterizer && rctx->rasterizer->multisample_enable &&
845 !rctx->framebuffer.cb0_is_integer;
846 key->ps.nr_cbufs = rctx->framebuffer.state.nr_cbufs;
847 key->ps.apply_sample_id_mask = (rctx->ps_iter_samples > 1) || !rctx->rasterizer->multisample_enable;
848 /* Dual-source blending only makes sense with nr_cbufs == 1. */
849 if (key->ps.nr_cbufs == 1 && rctx->dual_src_blend) {
850 key->ps.nr_cbufs = 2;
851 key->ps.dual_source_blend = 1;
852 }
853 break;
854 }
855 case PIPE_SHADER_TESS_EVAL:
856 key->tes.as_es = (rctx->gs_shader != NULL);
857 key->tes.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_EVAL);
858 break;
859 case PIPE_SHADER_TESS_CTRL:
860 key->tcs.prim_mode = rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
861 key->tcs.first_atomic_counter = r600_get_hw_atomic_count(ctx, PIPE_SHADER_TESS_CTRL);
862 break;
863 case PIPE_SHADER_COMPUTE:
864 break;
865 default:
866 assert(0);
867 }
868 }
869
870 static void
r600_shader_precompile_key(const struct pipe_context * ctx,const struct r600_pipe_shader_selector * sel,union r600_shader_key * key)871 r600_shader_precompile_key(const struct pipe_context *ctx,
872 const struct r600_pipe_shader_selector *sel,
873 union r600_shader_key *key)
874 {
875 memset(key, 0, sizeof(*key));
876
877 switch (sel->type) {
878 case PIPE_SHADER_VERTEX:
879 case PIPE_SHADER_TESS_EVAL:
880 /* Assume no tess or GS for setting .as_es. In order to
881 * precompile with es, we'd need the other shaders we're linked
882 * with (see the link_shader screen method)
883 */
884 break;
885
886 case PIPE_SHADER_GEOMETRY:
887 break;
888
889 case PIPE_SHADER_FRAGMENT:
890 key->ps.image_size_const_offset = sel->info.file_max[TGSI_FILE_IMAGE];
891
892 /* This is used for gl_FragColor output expansion to the number
893 * of color buffers bound, but also with sb it'll drop outputs
894 * to unused cbufs.
895 */
896 key->ps.nr_cbufs = sel->info.file_max[TGSI_FILE_OUTPUT] + 1;
897 break;
898
899 case PIPE_SHADER_TESS_CTRL:
900 /* Prim mode comes from the TES, but we need some valid value. */
901 key->tcs.prim_mode = PIPE_PRIM_TRIANGLES;
902 break;
903
904 case PIPE_SHADER_COMPUTE:
905 break;
906
907 default:
908 unreachable("bad shader stage");
909 break;
910 }
911 }
912
913 /* Select the hw shader variant depending on the current state.
914 * (*dirty) is set to 1 if current variant was changed */
r600_shader_select(struct pipe_context * ctx,struct r600_pipe_shader_selector * sel,bool * dirty,bool precompile)915 int r600_shader_select(struct pipe_context *ctx,
916 struct r600_pipe_shader_selector* sel,
917 bool *dirty, bool precompile)
918 {
919 union r600_shader_key key;
920 struct r600_pipe_shader * shader = NULL;
921 int r;
922
923 if (precompile)
924 r600_shader_precompile_key(ctx, sel, &key);
925 else
926 r600_shader_selector_key(ctx, sel, &key);
927
928 /* Check if we don't need to change anything.
929 * This path is also used for most shaders that don't need multiple
930 * variants, it will cost just a computation of the key and this
931 * test. */
932 if (likely(sel->current && memcmp(&sel->current->key, &key, sizeof(key)) == 0)) {
933 return 0;
934 }
935
936 /* lookup if we have other variants in the list */
937 if (sel->num_shaders > 1) {
938 struct r600_pipe_shader *p = sel->current, *c = p->next_variant;
939
940 while (c && memcmp(&c->key, &key, sizeof(key)) != 0) {
941 p = c;
942 c = c->next_variant;
943 }
944
945 if (c) {
946 p->next_variant = c->next_variant;
947 shader = c;
948 }
949 }
950
951 if (unlikely(!shader)) {
952 shader = CALLOC(1, sizeof(struct r600_pipe_shader));
953 shader->selector = sel;
954
955 r = r600_pipe_shader_create(ctx, shader, key);
956 if (unlikely(r)) {
957 R600_ERR("Failed to build shader variant (type=%u) %d\n",
958 sel->type, r);
959 sel->current = NULL;
960 FREE(shader);
961 return r;
962 }
963
964 memcpy(&shader->key, &key, sizeof(key));
965 sel->num_shaders++;
966 }
967
968 if (dirty)
969 *dirty = true;
970
971 shader->next_variant = sel->current;
972 sel->current = shader;
973
974 return 0;
975 }
976
r600_create_shader_state_tokens(struct pipe_context * ctx,const void * prog,enum pipe_shader_ir ir,unsigned pipe_shader_type)977 struct r600_pipe_shader_selector *r600_create_shader_state_tokens(struct pipe_context *ctx,
978 const void *prog, enum pipe_shader_ir ir,
979 unsigned pipe_shader_type)
980 {
981 struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector);
982
983 sel->type = pipe_shader_type;
984 if (ir == PIPE_SHADER_IR_TGSI) {
985 sel->tokens = tgsi_dup_tokens((const struct tgsi_token *)prog);
986 tgsi_scan_shader(sel->tokens, &sel->info);
987 } else if (ir == PIPE_SHADER_IR_NIR){
988 sel->nir = nir_shader_clone(NULL, (const nir_shader *)prog);
989 nir_tgsi_scan_shader(sel->nir, &sel->info, true);
990 }
991 return sel;
992 }
993
r600_create_shader_state(struct pipe_context * ctx,const struct pipe_shader_state * state,unsigned pipe_shader_type)994 static void *r600_create_shader_state(struct pipe_context *ctx,
995 const struct pipe_shader_state *state,
996 unsigned pipe_shader_type)
997 {
998 int i;
999 struct r600_pipe_shader_selector *sel;
1000
1001 if (state->type == PIPE_SHADER_IR_TGSI)
1002 sel = r600_create_shader_state_tokens(ctx, state->tokens, state->type, pipe_shader_type);
1003 else if (state->type == PIPE_SHADER_IR_NIR) {
1004 sel = r600_create_shader_state_tokens(ctx, state->ir.nir, state->type, pipe_shader_type);
1005 } else
1006 assert(0 && "Unknown shader type\n");
1007
1008 sel->ir_type = state->type;
1009 sel->so = state->stream_output;
1010
1011 switch (pipe_shader_type) {
1012 case PIPE_SHADER_GEOMETRY:
1013 sel->gs_output_prim =
1014 sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM];
1015 sel->gs_max_out_vertices =
1016 sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
1017 sel->gs_num_invocations =
1018 sel->info.properties[TGSI_PROPERTY_GS_INVOCATIONS];
1019 break;
1020 case PIPE_SHADER_VERTEX:
1021 case PIPE_SHADER_TESS_CTRL:
1022 sel->lds_patch_outputs_written_mask = 0;
1023 sel->lds_outputs_written_mask = 0;
1024
1025 for (i = 0; i < sel->info.num_outputs; i++) {
1026 unsigned name = sel->info.output_semantic_name[i];
1027 unsigned index = sel->info.output_semantic_index[i];
1028
1029 switch (name) {
1030 case TGSI_SEMANTIC_TESSINNER:
1031 case TGSI_SEMANTIC_TESSOUTER:
1032 case TGSI_SEMANTIC_PATCH:
1033 sel->lds_patch_outputs_written_mask |=
1034 1ull << r600_get_lds_unique_index(name, index);
1035 break;
1036 default:
1037 sel->lds_outputs_written_mask |=
1038 1ull << r600_get_lds_unique_index(name, index);
1039 }
1040 }
1041 break;
1042 default:
1043 break;
1044 }
1045
1046 /* Precompile the shader with the expected shader key, to reduce jank at
1047 * draw time. Also produces output for shader-db.
1048 */
1049 bool dirty;
1050 r600_shader_select(ctx, sel, &dirty, true);
1051
1052 return sel;
1053 }
1054
r600_create_ps_state(struct pipe_context * ctx,const struct pipe_shader_state * state)1055 static void *r600_create_ps_state(struct pipe_context *ctx,
1056 const struct pipe_shader_state *state)
1057 {
1058 return r600_create_shader_state(ctx, state, PIPE_SHADER_FRAGMENT);
1059 }
1060
r600_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)1061 static void *r600_create_vs_state(struct pipe_context *ctx,
1062 const struct pipe_shader_state *state)
1063 {
1064 return r600_create_shader_state(ctx, state, PIPE_SHADER_VERTEX);
1065 }
1066
r600_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)1067 static void *r600_create_gs_state(struct pipe_context *ctx,
1068 const struct pipe_shader_state *state)
1069 {
1070 return r600_create_shader_state(ctx, state, PIPE_SHADER_GEOMETRY);
1071 }
1072
r600_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)1073 static void *r600_create_tcs_state(struct pipe_context *ctx,
1074 const struct pipe_shader_state *state)
1075 {
1076 return r600_create_shader_state(ctx, state, PIPE_SHADER_TESS_CTRL);
1077 }
1078
r600_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * state)1079 static void *r600_create_tes_state(struct pipe_context *ctx,
1080 const struct pipe_shader_state *state)
1081 {
1082 return r600_create_shader_state(ctx, state, PIPE_SHADER_TESS_EVAL);
1083 }
1084
r600_bind_ps_state(struct pipe_context * ctx,void * state)1085 static void r600_bind_ps_state(struct pipe_context *ctx, void *state)
1086 {
1087 struct r600_context *rctx = (struct r600_context *)ctx;
1088
1089 if (!state)
1090 state = rctx->dummy_pixel_shader;
1091
1092 rctx->ps_shader = (struct r600_pipe_shader_selector *)state;
1093 }
1094
r600_get_vs_info(struct r600_context * rctx)1095 static struct tgsi_shader_info *r600_get_vs_info(struct r600_context *rctx)
1096 {
1097 if (rctx->gs_shader)
1098 return &rctx->gs_shader->info;
1099 else if (rctx->tes_shader)
1100 return &rctx->tes_shader->info;
1101 else if (rctx->vs_shader)
1102 return &rctx->vs_shader->info;
1103 else
1104 return NULL;
1105 }
1106
r600_bind_vs_state(struct pipe_context * ctx,void * state)1107 static void r600_bind_vs_state(struct pipe_context *ctx, void *state)
1108 {
1109 struct r600_context *rctx = (struct r600_context *)ctx;
1110
1111 if (!state || rctx->vs_shader == state)
1112 return;
1113
1114 rctx->vs_shader = (struct r600_pipe_shader_selector *)state;
1115 r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
1116
1117 if (rctx->vs_shader->so.num_outputs)
1118 rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride;
1119 }
1120
r600_bind_gs_state(struct pipe_context * ctx,void * state)1121 static void r600_bind_gs_state(struct pipe_context *ctx, void *state)
1122 {
1123 struct r600_context *rctx = (struct r600_context *)ctx;
1124
1125 if (state == rctx->gs_shader)
1126 return;
1127
1128 rctx->gs_shader = (struct r600_pipe_shader_selector *)state;
1129 r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
1130
1131 if (!state)
1132 return;
1133
1134 if (rctx->gs_shader->so.num_outputs)
1135 rctx->b.streamout.stride_in_dw = rctx->gs_shader->so.stride;
1136 }
1137
r600_bind_tcs_state(struct pipe_context * ctx,void * state)1138 static void r600_bind_tcs_state(struct pipe_context *ctx, void *state)
1139 {
1140 struct r600_context *rctx = (struct r600_context *)ctx;
1141
1142 rctx->tcs_shader = (struct r600_pipe_shader_selector *)state;
1143 }
1144
r600_bind_tes_state(struct pipe_context * ctx,void * state)1145 static void r600_bind_tes_state(struct pipe_context *ctx, void *state)
1146 {
1147 struct r600_context *rctx = (struct r600_context *)ctx;
1148
1149 if (state == rctx->tes_shader)
1150 return;
1151
1152 rctx->tes_shader = (struct r600_pipe_shader_selector *)state;
1153 r600_update_vs_writes_viewport_index(&rctx->b, r600_get_vs_info(rctx));
1154
1155 if (!state)
1156 return;
1157
1158 if (rctx->tes_shader->so.num_outputs)
1159 rctx->b.streamout.stride_in_dw = rctx->tes_shader->so.stride;
1160 }
1161
r600_delete_shader_selector(struct pipe_context * ctx,struct r600_pipe_shader_selector * sel)1162 void r600_delete_shader_selector(struct pipe_context *ctx,
1163 struct r600_pipe_shader_selector *sel)
1164 {
1165 struct r600_pipe_shader *p = sel->current, *c;
1166 while (p) {
1167 c = p->next_variant;
1168 r600_pipe_shader_destroy(ctx, p);
1169 free(p);
1170 p = c;
1171 }
1172
1173 if (sel->ir_type == PIPE_SHADER_IR_TGSI) {
1174 free(sel->tokens);
1175 /* We might have converted the TGSI shader to a NIR shader */
1176 if (sel->nir)
1177 ralloc_free(sel->nir);
1178 }
1179 else if (sel->ir_type == PIPE_SHADER_IR_NIR)
1180 ralloc_free(sel->nir);
1181 free(sel);
1182 }
1183
1184
r600_delete_ps_state(struct pipe_context * ctx,void * state)1185 static void r600_delete_ps_state(struct pipe_context *ctx, void *state)
1186 {
1187 struct r600_context *rctx = (struct r600_context *)ctx;
1188 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state;
1189
1190 if (rctx->ps_shader == sel) {
1191 rctx->ps_shader = NULL;
1192 }
1193
1194 r600_delete_shader_selector(ctx, sel);
1195 }
1196
r600_delete_vs_state(struct pipe_context * ctx,void * state)1197 static void r600_delete_vs_state(struct pipe_context *ctx, void *state)
1198 {
1199 struct r600_context *rctx = (struct r600_context *)ctx;
1200 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state;
1201
1202 if (rctx->vs_shader == sel) {
1203 rctx->vs_shader = NULL;
1204 }
1205
1206 r600_delete_shader_selector(ctx, sel);
1207 }
1208
1209
r600_delete_gs_state(struct pipe_context * ctx,void * state)1210 static void r600_delete_gs_state(struct pipe_context *ctx, void *state)
1211 {
1212 struct r600_context *rctx = (struct r600_context *)ctx;
1213 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state;
1214
1215 if (rctx->gs_shader == sel) {
1216 rctx->gs_shader = NULL;
1217 }
1218
1219 r600_delete_shader_selector(ctx, sel);
1220 }
1221
r600_delete_tcs_state(struct pipe_context * ctx,void * state)1222 static void r600_delete_tcs_state(struct pipe_context *ctx, void *state)
1223 {
1224 struct r600_context *rctx = (struct r600_context *)ctx;
1225 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state;
1226
1227 if (rctx->tcs_shader == sel) {
1228 rctx->tcs_shader = NULL;
1229 }
1230
1231 r600_delete_shader_selector(ctx, sel);
1232 }
1233
r600_delete_tes_state(struct pipe_context * ctx,void * state)1234 static void r600_delete_tes_state(struct pipe_context *ctx, void *state)
1235 {
1236 struct r600_context *rctx = (struct r600_context *)ctx;
1237 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state;
1238
1239 if (rctx->tes_shader == sel) {
1240 rctx->tes_shader = NULL;
1241 }
1242
1243 r600_delete_shader_selector(ctx, sel);
1244 }
1245
r600_constant_buffers_dirty(struct r600_context * rctx,struct r600_constbuf_state * state)1246 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
1247 {
1248 if (state->dirty_mask) {
1249 state->atom.num_dw = rctx->b.chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
1250 : util_bitcount(state->dirty_mask)*19;
1251 r600_mark_atom_dirty(rctx, &state->atom);
1252 }
1253 }
1254
r600_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * input)1255 static void r600_set_constant_buffer(struct pipe_context *ctx,
1256 enum pipe_shader_type shader, uint index,
1257 bool take_ownership,
1258 const struct pipe_constant_buffer *input)
1259 {
1260 struct r600_context *rctx = (struct r600_context *)ctx;
1261 struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
1262 struct pipe_constant_buffer *cb;
1263 const uint8_t *ptr;
1264
1265 /* Note that the gallium frontend can unbind constant buffers by
1266 * passing NULL here.
1267 */
1268 if (unlikely(!input || (!input->buffer && !input->user_buffer))) {
1269 state->enabled_mask &= ~(1 << index);
1270 state->dirty_mask &= ~(1 << index);
1271 pipe_resource_reference(&state->cb[index].buffer, NULL);
1272 return;
1273 }
1274
1275 cb = &state->cb[index];
1276 cb->buffer_size = input->buffer_size;
1277
1278 ptr = input->user_buffer;
1279
1280 if (ptr) {
1281 /* Upload the user buffer. */
1282 if (R600_BIG_ENDIAN) {
1283 uint32_t *tmpPtr;
1284 unsigned i, size = input->buffer_size;
1285
1286 if (!(tmpPtr = malloc(size))) {
1287 R600_ERR("Failed to allocate BE swap buffer.\n");
1288 return;
1289 }
1290
1291 for (i = 0; i < size / 4; ++i) {
1292 tmpPtr[i] = util_cpu_to_le32(((uint32_t *)ptr)[i]);
1293 }
1294
1295 u_upload_data(ctx->stream_uploader, 0, size, 256,
1296 tmpPtr, &cb->buffer_offset, &cb->buffer);
1297 free(tmpPtr);
1298 } else {
1299 u_upload_data(ctx->stream_uploader, 0,
1300 input->buffer_size, 256, ptr,
1301 &cb->buffer_offset, &cb->buffer);
1302 }
1303 /* account it in gtt */
1304 rctx->b.gtt += input->buffer_size;
1305 } else {
1306 /* Setup the hw buffer. */
1307 cb->buffer_offset = input->buffer_offset;
1308 if (take_ownership) {
1309 pipe_resource_reference(&cb->buffer, NULL);
1310 cb->buffer = input->buffer;
1311 } else {
1312 pipe_resource_reference(&cb->buffer, input->buffer);
1313 }
1314 r600_context_add_resource_size(ctx, input->buffer);
1315 }
1316
1317 state->enabled_mask |= 1 << index;
1318 state->dirty_mask |= 1 << index;
1319 r600_constant_buffers_dirty(rctx, state);
1320 }
1321
r600_set_sample_mask(struct pipe_context * pipe,unsigned sample_mask)1322 static void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
1323 {
1324 struct r600_context *rctx = (struct r600_context*)pipe;
1325
1326 if (rctx->sample_mask.sample_mask == (uint16_t)sample_mask)
1327 return;
1328
1329 rctx->sample_mask.sample_mask = sample_mask;
1330 r600_mark_atom_dirty(rctx, &rctx->sample_mask.atom);
1331 }
1332
r600_update_driver_const_buffers(struct r600_context * rctx,bool compute_only)1333 void r600_update_driver_const_buffers(struct r600_context *rctx, bool compute_only)
1334 {
1335 int sh, size;
1336 void *ptr;
1337 struct pipe_constant_buffer cb;
1338 int start, end;
1339
1340 start = compute_only ? PIPE_SHADER_COMPUTE : 0;
1341 end = compute_only ? PIPE_SHADER_TYPES : PIPE_SHADER_COMPUTE;
1342
1343 for (sh = start; sh < end; sh++) {
1344 struct r600_shader_driver_constants_info *info = &rctx->driver_consts[sh];
1345 if (!info->vs_ucp_dirty &&
1346 !info->texture_const_dirty &&
1347 !info->ps_sample_pos_dirty &&
1348 !info->tcs_default_levels_dirty &&
1349 !info->cs_block_grid_size_dirty)
1350 continue;
1351
1352 ptr = info->constants;
1353 size = info->alloc_size;
1354 if (info->vs_ucp_dirty) {
1355 assert(sh == PIPE_SHADER_VERTEX);
1356 if (!size) {
1357 ptr = rctx->clip_state.state.ucp;
1358 size = R600_UCP_SIZE;
1359 } else {
1360 memcpy(ptr, rctx->clip_state.state.ucp, R600_UCP_SIZE);
1361 }
1362 info->vs_ucp_dirty = false;
1363 }
1364
1365 else if (info->ps_sample_pos_dirty) {
1366 assert(sh == PIPE_SHADER_FRAGMENT);
1367 if (!size) {
1368 ptr = rctx->sample_positions;
1369 size = R600_UCP_SIZE;
1370 } else {
1371 memcpy(ptr, rctx->sample_positions, R600_UCP_SIZE);
1372 }
1373 info->ps_sample_pos_dirty = false;
1374 }
1375
1376 else if (info->cs_block_grid_size_dirty) {
1377 assert(sh == PIPE_SHADER_COMPUTE);
1378 if (!size) {
1379 ptr = rctx->cs_block_grid_sizes;
1380 size = R600_CS_BLOCK_GRID_SIZE;
1381 } else {
1382 memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE);
1383 }
1384 info->cs_block_grid_size_dirty = false;
1385 }
1386
1387 else if (info->tcs_default_levels_dirty) {
1388 /*
1389 * We'd only really need this for default tcs shader.
1390 */
1391 assert(sh == PIPE_SHADER_TESS_CTRL);
1392 if (!size) {
1393 ptr = rctx->tess_state;
1394 size = R600_TCS_DEFAULT_LEVELS_SIZE;
1395 } else {
1396 memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE);
1397 }
1398 info->tcs_default_levels_dirty = false;
1399 }
1400
1401 if (info->texture_const_dirty) {
1402 assert (ptr);
1403 assert (size);
1404 if (sh == PIPE_SHADER_VERTEX)
1405 memcpy(ptr, rctx->clip_state.state.ucp, R600_UCP_SIZE);
1406 if (sh == PIPE_SHADER_FRAGMENT)
1407 memcpy(ptr, rctx->sample_positions, R600_UCP_SIZE);
1408 if (sh == PIPE_SHADER_COMPUTE)
1409 memcpy(ptr, rctx->cs_block_grid_sizes, R600_CS_BLOCK_GRID_SIZE);
1410 if (sh == PIPE_SHADER_TESS_CTRL)
1411 memcpy(ptr, rctx->tess_state, R600_TCS_DEFAULT_LEVELS_SIZE);
1412 }
1413 info->texture_const_dirty = false;
1414
1415 cb.buffer = NULL;
1416 cb.user_buffer = ptr;
1417 cb.buffer_offset = 0;
1418 cb.buffer_size = size;
1419 rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, &cb);
1420 pipe_resource_reference(&cb.buffer, NULL);
1421 }
1422 }
1423
r600_alloc_buf_consts(struct r600_context * rctx,int shader_type,unsigned array_size,uint32_t * base_offset)1424 static void *r600_alloc_buf_consts(struct r600_context *rctx, int shader_type,
1425 unsigned array_size, uint32_t *base_offset)
1426 {
1427 struct r600_shader_driver_constants_info *info = &rctx->driver_consts[shader_type];
1428 if (array_size + R600_UCP_SIZE > info->alloc_size) {
1429 info->constants = realloc(info->constants, array_size + R600_UCP_SIZE);
1430 info->alloc_size = array_size + R600_UCP_SIZE;
1431 }
1432 memset(info->constants + (R600_UCP_SIZE / 4), 0, array_size);
1433 info->texture_const_dirty = true;
1434 *base_offset = R600_UCP_SIZE;
1435 return info->constants;
1436 }
1437 /*
1438 * On r600/700 hw we don't have vertex fetch swizzle, though TBO
1439 * doesn't require full swizzles it does need masking and setting alpha
1440 * to one, so we setup a set of 5 constants with the masks + alpha value
1441 * then in the shader, we AND the 4 components with 0xffffffff or 0,
1442 * then OR the alpha with the value given here.
1443 * We use a 6th constant to store the txq buffer size in
1444 * we use 7th slot for number of cube layers in a cube map array.
1445 */
r600_setup_buffer_constants(struct r600_context * rctx,int shader_type)1446 static void r600_setup_buffer_constants(struct r600_context *rctx, int shader_type)
1447 {
1448 struct r600_textures_info *samplers = &rctx->samplers[shader_type];
1449 int bits;
1450 uint32_t array_size;
1451 int i, j;
1452 uint32_t *constants;
1453 uint32_t base_offset;
1454 if (!samplers->views.dirty_buffer_constants)
1455 return;
1456
1457 samplers->views.dirty_buffer_constants = FALSE;
1458
1459 bits = util_last_bit(samplers->views.enabled_mask);
1460 array_size = bits * 8 * sizeof(uint32_t);
1461
1462 constants = r600_alloc_buf_consts(rctx, shader_type, array_size, &base_offset);
1463
1464 for (i = 0; i < bits; i++) {
1465 if (samplers->views.enabled_mask & (1 << i)) {
1466 int offset = (base_offset / 4) + i * 8;
1467 const struct util_format_description *desc;
1468 desc = util_format_description(samplers->views.views[i]->base.format);
1469
1470 for (j = 0; j < 4; j++)
1471 if (j < desc->nr_channels)
1472 constants[offset+j] = 0xffffffff;
1473 else
1474 constants[offset+j] = 0x0;
1475 if (desc->nr_channels < 4) {
1476 if (desc->channel[0].pure_integer)
1477 constants[offset+4] = 1;
1478 else
1479 constants[offset+4] = fui(1.0);
1480 } else
1481 constants[offset + 4] = 0;
1482
1483 constants[offset + 5] = samplers->views.views[i]->base.u.buf.size /
1484 util_format_get_blocksize(samplers->views.views[i]->base.format);
1485 constants[offset + 6] = samplers->views.views[i]->base.texture->array_size / 6;
1486 }
1487 }
1488
1489 }
1490
1491 /* On evergreen we store one value
1492 * 1. number of cube layers in a cube map array.
1493 */
eg_setup_buffer_constants(struct r600_context * rctx,int shader_type)1494 void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type)
1495 {
1496 struct r600_textures_info *samplers = &rctx->samplers[shader_type];
1497 struct r600_image_state *images = NULL;
1498 int bits, sview_bits, img_bits;
1499 uint32_t array_size;
1500 int i;
1501 uint32_t *constants;
1502 uint32_t base_offset;
1503
1504 if (shader_type == PIPE_SHADER_FRAGMENT) {
1505 images = &rctx->fragment_images;
1506 } else if (shader_type == PIPE_SHADER_COMPUTE) {
1507 images = &rctx->compute_images;
1508 }
1509
1510 if (!samplers->views.dirty_buffer_constants &&
1511 !(images && images->dirty_buffer_constants))
1512 return;
1513
1514 if (images)
1515 images->dirty_buffer_constants = FALSE;
1516 samplers->views.dirty_buffer_constants = FALSE;
1517
1518 bits = sview_bits = util_last_bit(samplers->views.enabled_mask);
1519 if (images)
1520 bits += util_last_bit(images->enabled_mask);
1521 img_bits = bits;
1522
1523 array_size = bits * sizeof(uint32_t);
1524
1525 constants = r600_alloc_buf_consts(rctx, shader_type, array_size,
1526 &base_offset);
1527
1528 for (i = 0; i < sview_bits; i++) {
1529 if (samplers->views.enabled_mask & (1 << i)) {
1530 uint32_t offset = (base_offset / 4) + i;
1531 constants[offset] = samplers->views.views[i]->base.texture->array_size / 6;
1532 }
1533 }
1534 if (images) {
1535 for (i = sview_bits; i < img_bits; i++) {
1536 int idx = i - sview_bits;
1537 if (images->enabled_mask & (1 << idx)) {
1538 uint32_t offset = (base_offset / 4) + i;
1539 constants[offset] = images->views[idx].base.resource->array_size / 6;
1540 }
1541 }
1542 }
1543 }
1544
1545 /* set sample xy locations as array of fragment shader constants */
r600_set_sample_locations_constant_buffer(struct r600_context * rctx)1546 void r600_set_sample_locations_constant_buffer(struct r600_context *rctx)
1547 {
1548 struct pipe_context *ctx = &rctx->b.b;
1549
1550 assert(rctx->framebuffer.nr_samples < R600_UCP_SIZE);
1551 assert(rctx->framebuffer.nr_samples <= ARRAY_SIZE(rctx->sample_positions)/4);
1552
1553 memset(rctx->sample_positions, 0, 4 * 4 * 16);
1554 for (unsigned i = 0; i < rctx->framebuffer.nr_samples; i++) {
1555 ctx->get_sample_position(ctx, rctx->framebuffer.nr_samples, i, &rctx->sample_positions[4*i]);
1556 /* Also fill in center-zeroed positions used for interpolateAtSample */
1557 rctx->sample_positions[4*i + 2] = rctx->sample_positions[4*i + 0] - 0.5f;
1558 rctx->sample_positions[4*i + 3] = rctx->sample_positions[4*i + 1] - 0.5f;
1559 }
1560
1561 rctx->driver_consts[PIPE_SHADER_FRAGMENT].ps_sample_pos_dirty = true;
1562 }
1563
update_shader_atom(struct pipe_context * ctx,struct r600_shader_state * state,struct r600_pipe_shader * shader)1564 static void update_shader_atom(struct pipe_context *ctx,
1565 struct r600_shader_state *state,
1566 struct r600_pipe_shader *shader)
1567 {
1568 struct r600_context *rctx = (struct r600_context *)ctx;
1569
1570 state->shader = shader;
1571 if (shader) {
1572 state->atom.num_dw = shader->command_buffer.num_dw;
1573 r600_context_add_resource_size(ctx, (struct pipe_resource *)shader->bo);
1574 } else {
1575 state->atom.num_dw = 0;
1576 }
1577 r600_mark_atom_dirty(rctx, &state->atom);
1578 }
1579
update_gs_block_state(struct r600_context * rctx,unsigned enable)1580 static void update_gs_block_state(struct r600_context *rctx, unsigned enable)
1581 {
1582 if (rctx->shader_stages.geom_enable != enable) {
1583 rctx->shader_stages.geom_enable = enable;
1584 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom);
1585 }
1586
1587 if (rctx->gs_rings.enable != enable) {
1588 rctx->gs_rings.enable = enable;
1589 r600_mark_atom_dirty(rctx, &rctx->gs_rings.atom);
1590
1591 if (enable && !rctx->gs_rings.esgs_ring.buffer) {
1592 unsigned size = 0x1C000;
1593 rctx->gs_rings.esgs_ring.buffer =
1594 pipe_buffer_create(rctx->b.b.screen, 0,
1595 PIPE_USAGE_DEFAULT, size);
1596 rctx->gs_rings.esgs_ring.buffer_size = size;
1597
1598 size = 0x4000000;
1599
1600 rctx->gs_rings.gsvs_ring.buffer =
1601 pipe_buffer_create(rctx->b.b.screen, 0,
1602 PIPE_USAGE_DEFAULT, size);
1603 rctx->gs_rings.gsvs_ring.buffer_size = size;
1604 }
1605
1606 if (enable) {
1607 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY,
1608 R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.esgs_ring);
1609 if (rctx->tes_shader) {
1610 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
1611 R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring);
1612 } else {
1613 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
1614 R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring);
1615 }
1616 } else {
1617 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY,
1618 R600_GS_RING_CONST_BUFFER, false, NULL);
1619 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
1620 R600_GS_RING_CONST_BUFFER, false, NULL);
1621 r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
1622 R600_GS_RING_CONST_BUFFER, false, NULL);
1623 }
1624 }
1625 }
1626
r600_update_clip_state(struct r600_context * rctx,struct r600_pipe_shader * current)1627 static void r600_update_clip_state(struct r600_context *rctx,
1628 struct r600_pipe_shader *current)
1629 {
1630 if (current->pa_cl_vs_out_cntl != rctx->clip_misc_state.pa_cl_vs_out_cntl ||
1631 current->shader.clip_dist_write != rctx->clip_misc_state.clip_dist_write ||
1632 current->shader.cull_dist_write != rctx->clip_misc_state.cull_dist_write ||
1633 current->shader.vs_position_window_space != rctx->clip_misc_state.clip_disable ||
1634 current->shader.vs_out_viewport != rctx->clip_misc_state.vs_out_viewport) {
1635 rctx->clip_misc_state.pa_cl_vs_out_cntl = current->pa_cl_vs_out_cntl;
1636 rctx->clip_misc_state.clip_dist_write = current->shader.clip_dist_write;
1637 rctx->clip_misc_state.cull_dist_write = current->shader.cull_dist_write;
1638 rctx->clip_misc_state.clip_disable = current->shader.vs_position_window_space;
1639 rctx->clip_misc_state.vs_out_viewport = current->shader.vs_out_viewport;
1640 r600_mark_atom_dirty(rctx, &rctx->clip_misc_state.atom);
1641 }
1642 }
1643
r600_generate_fixed_func_tcs(struct r600_context * rctx)1644 static void r600_generate_fixed_func_tcs(struct r600_context *rctx)
1645 {
1646 struct ureg_src const0, const1;
1647 struct ureg_dst tessouter, tessinner;
1648 struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
1649
1650 if (!ureg)
1651 return; /* if we get here, we're screwed */
1652
1653 assert(!rctx->fixed_func_tcs_shader);
1654
1655 ureg_DECL_constant2D(ureg, 0, 1, R600_BUFFER_INFO_CONST_BUFFER);
1656 const0 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 0),
1657 R600_BUFFER_INFO_CONST_BUFFER);
1658 const1 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 1),
1659 R600_BUFFER_INFO_CONST_BUFFER);
1660
1661 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
1662 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
1663
1664 ureg_MOV(ureg, tessouter, const0);
1665 ureg_MOV(ureg, tessinner, const1);
1666 ureg_END(ureg);
1667
1668 rctx->fixed_func_tcs_shader =
1669 ureg_create_shader_and_destroy(ureg, &rctx->b.b);
1670 }
1671
r600_update_compressed_resource_state(struct r600_context * rctx,bool compute_only)1672 void r600_update_compressed_resource_state(struct r600_context *rctx, bool compute_only)
1673 {
1674 unsigned i;
1675 unsigned counter;
1676
1677 counter = p_atomic_read(&rctx->screen->b.compressed_colortex_counter);
1678 if (counter != rctx->b.last_compressed_colortex_counter) {
1679 rctx->b.last_compressed_colortex_counter = counter;
1680
1681 if (compute_only) {
1682 r600_update_compressed_colortex_mask(&rctx->samplers[PIPE_SHADER_COMPUTE].views);
1683 } else {
1684 for (i = 0; i < PIPE_SHADER_TYPES; ++i) {
1685 r600_update_compressed_colortex_mask(&rctx->samplers[i].views);
1686 }
1687 }
1688 if (!compute_only)
1689 r600_update_compressed_colortex_mask_images(&rctx->fragment_images);
1690 r600_update_compressed_colortex_mask_images(&rctx->compute_images);
1691 }
1692
1693 /* Decompress textures if needed. */
1694 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
1695 struct r600_samplerview_state *views = &rctx->samplers[i].views;
1696
1697 if (compute_only)
1698 if (i != PIPE_SHADER_COMPUTE)
1699 continue;
1700 if (views->compressed_depthtex_mask) {
1701 r600_decompress_depth_textures(rctx, views);
1702 }
1703 if (views->compressed_colortex_mask) {
1704 r600_decompress_color_textures(rctx, views);
1705 }
1706 }
1707
1708 {
1709 struct r600_image_state *istate;
1710
1711 if (!compute_only) {
1712 istate = &rctx->fragment_images;
1713 if (istate->compressed_depthtex_mask)
1714 r600_decompress_depth_images(rctx, istate);
1715 if (istate->compressed_colortex_mask)
1716 r600_decompress_color_images(rctx, istate);
1717 }
1718
1719 istate = &rctx->compute_images;
1720 if (istate->compressed_depthtex_mask)
1721 r600_decompress_depth_images(rctx, istate);
1722 if (istate->compressed_colortex_mask)
1723 r600_decompress_color_images(rctx, istate);
1724 }
1725 }
1726
1727 /* update MEM_SCRATCH buffers if needed */
r600_setup_scratch_area_for_shader(struct r600_context * rctx,struct r600_pipe_shader * shader,struct r600_scratch_buffer * scratch,unsigned ring_base_reg,unsigned item_size_reg,unsigned ring_size_reg)1728 void r600_setup_scratch_area_for_shader(struct r600_context *rctx,
1729 struct r600_pipe_shader *shader, struct r600_scratch_buffer *scratch,
1730 unsigned ring_base_reg, unsigned item_size_reg, unsigned ring_size_reg)
1731 {
1732 unsigned num_ses = rctx->screen->b.info.max_se;
1733 unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
1734 unsigned nthreads = 128;
1735
1736 unsigned itemsize = shader->scratch_space_needed * 4;
1737 unsigned size = align(itemsize * nthreads * num_pipes * num_ses * 4, 256);
1738
1739 if (scratch->dirty ||
1740 unlikely(shader->scratch_space_needed != scratch->item_size ||
1741 size > scratch->size)) {
1742 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
1743
1744 scratch->dirty = false;
1745
1746 if (size > scratch->size) {
1747 // Release prior one if any
1748 if (scratch->buffer) {
1749 pipe_resource_reference((struct pipe_resource**)&scratch->buffer, NULL);
1750 }
1751
1752 scratch->buffer = (struct r600_resource *)pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
1753 PIPE_USAGE_DEFAULT, size);
1754 if (scratch->buffer) {
1755 scratch->size = size;
1756 }
1757 }
1758
1759 scratch->item_size = shader->scratch_space_needed;
1760
1761 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
1762 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1763 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
1764
1765 // multi-SE chips need programming per SE
1766 for (unsigned se = 0; se < num_ses; se++) {
1767 struct r600_resource *rbuffer = scratch->buffer;
1768 unsigned size_per_se = size / num_ses;
1769
1770 // Direct to particular SE
1771 if (num_ses > 1) {
1772 radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX,
1773 S_0802C_INSTANCE_INDEX(0) |
1774 S_0802C_SE_INDEX(se) |
1775 S_0802C_INSTANCE_BROADCAST_WRITES(1) |
1776 S_0802C_SE_BROADCAST_WRITES(0));
1777 }
1778
1779 radeon_set_config_reg(cs, ring_base_reg, (rbuffer->gpu_address + size_per_se * se) >> 8);
1780 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1781 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
1782 RADEON_USAGE_READWRITE |
1783 RADEON_PRIO_SCRATCH_BUFFER));
1784 radeon_set_context_reg(cs, item_size_reg, itemsize);
1785 radeon_set_config_reg(cs, ring_size_reg, size_per_se >> 8);
1786 }
1787
1788 // Restore broadcast mode
1789 if (num_ses > 1) {
1790 radeon_set_config_reg(cs, EG_0802C_GRBM_GFX_INDEX,
1791 S_0802C_INSTANCE_INDEX(0) |
1792 S_0802C_SE_INDEX(0) |
1793 S_0802C_INSTANCE_BROADCAST_WRITES(1) |
1794 S_0802C_SE_BROADCAST_WRITES(1));
1795 }
1796
1797 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
1798 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1799 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
1800 }
1801 }
1802
r600_setup_scratch_buffers(struct r600_context * rctx)1803 void r600_setup_scratch_buffers(struct r600_context *rctx) {
1804 static const struct {
1805 unsigned ring_base;
1806 unsigned item_size;
1807 unsigned ring_size;
1808 } regs[R600_NUM_HW_STAGES] = {
1809 [R600_HW_STAGE_PS] = { R_008C68_SQ_PSTMP_RING_BASE, R_0288BC_SQ_PSTMP_RING_ITEMSIZE, R_008C6C_SQ_PSTMP_RING_SIZE },
1810 [R600_HW_STAGE_VS] = { R_008C60_SQ_VSTMP_RING_BASE, R_0288B8_SQ_VSTMP_RING_ITEMSIZE, R_008C64_SQ_VSTMP_RING_SIZE },
1811 [R600_HW_STAGE_GS] = { R_008C58_SQ_GSTMP_RING_BASE, R_0288B4_SQ_GSTMP_RING_ITEMSIZE, R_008C5C_SQ_GSTMP_RING_SIZE },
1812 [R600_HW_STAGE_ES] = { R_008C50_SQ_ESTMP_RING_BASE, R_0288B0_SQ_ESTMP_RING_ITEMSIZE, R_008C54_SQ_ESTMP_RING_SIZE }
1813 };
1814
1815 for (unsigned i = 0; i < R600_NUM_HW_STAGES; i++) {
1816 struct r600_pipe_shader *stage = rctx->hw_shader_stages[i].shader;
1817
1818 if (stage && unlikely(stage->scratch_space_needed)) {
1819 r600_setup_scratch_area_for_shader(rctx, stage,
1820 &rctx->scratch_buffers[i], regs[i].ring_base, regs[i].item_size, regs[i].ring_size);
1821 }
1822 }
1823 }
1824
1825 #define SELECT_SHADER_OR_FAIL(x) do { \
1826 r600_shader_select(ctx, rctx->x##_shader, &x##_dirty, false); \
1827 if (unlikely(!rctx->x##_shader->current)) \
1828 return false; \
1829 } while(0)
1830
1831 #define UPDATE_SHADER(hw, sw) do { \
1832 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) \
1833 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1834 } while(0)
1835
1836 #define UPDATE_SHADER_CLIP(hw, sw) do { \
1837 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1838 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1839 clip_so_current = rctx->sw##_shader->current; \
1840 } \
1841 } while(0)
1842
1843 #define UPDATE_SHADER_GS(hw, hw2, sw) do { \
1844 if (sw##_dirty || (rctx->hw_shader_stages[(hw)].shader != rctx->sw##_shader->current)) { \
1845 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], rctx->sw##_shader->current); \
1846 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw2)], rctx->sw##_shader->current->gs_copy_shader); \
1847 clip_so_current = rctx->sw##_shader->current->gs_copy_shader; \
1848 } \
1849 } while(0)
1850
1851 #define SET_NULL_SHADER(hw) do { \
1852 if (rctx->hw_shader_stages[(hw)].shader) \
1853 update_shader_atom(ctx, &rctx->hw_shader_stages[(hw)], NULL); \
1854 } while (0)
1855
r600_update_derived_state(struct r600_context * rctx)1856 static bool r600_update_derived_state(struct r600_context *rctx)
1857 {
1858 struct pipe_context * ctx = (struct pipe_context*)rctx;
1859 bool ps_dirty = false, vs_dirty = false, gs_dirty = false;
1860 bool tcs_dirty = false, tes_dirty = false, fixed_func_tcs_dirty = false;
1861 bool blend_disable;
1862 bool need_buf_const;
1863 struct r600_pipe_shader *clip_so_current = NULL;
1864
1865 if (!rctx->blitter->running)
1866 r600_update_compressed_resource_state(rctx, false);
1867
1868 SELECT_SHADER_OR_FAIL(ps);
1869
1870 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom);
1871
1872 update_gs_block_state(rctx, rctx->gs_shader != NULL);
1873
1874 if (rctx->gs_shader)
1875 SELECT_SHADER_OR_FAIL(gs);
1876
1877 /* Hull Shader */
1878 if (rctx->tcs_shader) {
1879 SELECT_SHADER_OR_FAIL(tcs);
1880
1881 UPDATE_SHADER(EG_HW_STAGE_HS, tcs);
1882 } else if (rctx->tes_shader) {
1883 if (!rctx->fixed_func_tcs_shader) {
1884 r600_generate_fixed_func_tcs(rctx);
1885 if (!rctx->fixed_func_tcs_shader)
1886 return false;
1887
1888 }
1889 SELECT_SHADER_OR_FAIL(fixed_func_tcs);
1890
1891 UPDATE_SHADER(EG_HW_STAGE_HS, fixed_func_tcs);
1892 } else
1893 SET_NULL_SHADER(EG_HW_STAGE_HS);
1894
1895 if (rctx->tes_shader) {
1896 SELECT_SHADER_OR_FAIL(tes);
1897 }
1898
1899 SELECT_SHADER_OR_FAIL(vs);
1900
1901 if (rctx->gs_shader) {
1902 if (!rctx->shader_stages.geom_enable) {
1903 rctx->shader_stages.geom_enable = true;
1904 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom);
1905 }
1906
1907 /* gs_shader provides GS and VS (copy shader) */
1908 UPDATE_SHADER_GS(R600_HW_STAGE_GS, R600_HW_STAGE_VS, gs);
1909
1910 /* vs_shader is used as ES */
1911
1912 if (rctx->tes_shader) {
1913 /* VS goes to LS, TES goes to ES */
1914 UPDATE_SHADER(R600_HW_STAGE_ES, tes);
1915 UPDATE_SHADER(EG_HW_STAGE_LS, vs);
1916 } else {
1917 /* vs_shader is used as ES */
1918 UPDATE_SHADER(R600_HW_STAGE_ES, vs);
1919 SET_NULL_SHADER(EG_HW_STAGE_LS);
1920 }
1921 } else {
1922 if (unlikely(rctx->hw_shader_stages[R600_HW_STAGE_GS].shader)) {
1923 SET_NULL_SHADER(R600_HW_STAGE_GS);
1924 SET_NULL_SHADER(R600_HW_STAGE_ES);
1925 rctx->shader_stages.geom_enable = false;
1926 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom);
1927 }
1928
1929 if (rctx->tes_shader) {
1930 /* if TES is loaded and no geometry, TES runs on hw VS, VS runs on hw LS */
1931 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS, tes);
1932 UPDATE_SHADER(EG_HW_STAGE_LS, vs);
1933 } else {
1934 SET_NULL_SHADER(EG_HW_STAGE_LS);
1935 UPDATE_SHADER_CLIP(R600_HW_STAGE_VS, vs);
1936 }
1937 }
1938
1939 /*
1940 * XXX: I believe there's some fatal flaw in the dirty state logic when
1941 * enabling/disabling tes.
1942 * VS/ES share all buffer/resource/sampler slots. If TES is enabled,
1943 * it will therefore overwrite the VS slots. If it now gets disabled,
1944 * the VS needs to rebind all buffer/resource/sampler slots - not only
1945 * has TES overwritten the corresponding slots, but when the VS was
1946 * operating as LS the things with correpsonding dirty bits got bound
1947 * to LS slots and won't reflect what is dirty as VS stage even if the
1948 * TES didn't overwrite it. The story for re-enabled TES is similar.
1949 * In any case, we're not allowed to submit any TES state when
1950 * TES is disabled (the gallium frontend may not do this but this looks
1951 * like an optimization to me, not something which can be relied on).
1952 */
1953
1954 /* Update clip misc state. */
1955 if (clip_so_current) {
1956 r600_update_clip_state(rctx, clip_so_current);
1957 rctx->b.streamout.enabled_stream_buffers_mask = clip_so_current->enabled_stream_buffers_mask;
1958 }
1959
1960 if (unlikely(ps_dirty || rctx->hw_shader_stages[R600_HW_STAGE_PS].shader != rctx->ps_shader->current ||
1961 rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable ||
1962 rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade)) {
1963
1964 bool msaa = rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0;
1965 if (unlikely(rctx->ps_shader &&
1966 ((rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable) ||
1967 (rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade) ||
1968 (msaa != rctx->ps_shader->current->msaa)))) {
1969
1970 if (rctx->b.chip_class >= EVERGREEN)
1971 evergreen_update_ps_state(ctx, rctx->ps_shader->current);
1972 else
1973 r600_update_ps_state(ctx, rctx->ps_shader->current);
1974 }
1975
1976 if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs ||
1977 rctx->cb_misc_state.ps_color_export_mask != rctx->ps_shader->current->ps_color_export_mask) {
1978 rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs;
1979 rctx->cb_misc_state.ps_color_export_mask = rctx->ps_shader->current->ps_color_export_mask;
1980 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
1981 }
1982
1983 if (rctx->b.chip_class <= R700) {
1984 bool multiwrite = rctx->ps_shader->current->shader.fs_write_all;
1985
1986 if (rctx->cb_misc_state.multiwrite != multiwrite) {
1987 rctx->cb_misc_state.multiwrite = multiwrite;
1988 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
1989 }
1990 }
1991
1992 r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom);
1993 }
1994 UPDATE_SHADER(R600_HW_STAGE_PS, ps);
1995
1996 if (rctx->b.chip_class >= EVERGREEN) {
1997 evergreen_update_db_shader_control(rctx);
1998 } else {
1999 r600_update_db_shader_control(rctx);
2000 }
2001
2002 /* For each shader stage that needs to spill, set up buffer for MEM_SCRATCH */
2003 if (rctx->b.chip_class >= EVERGREEN) {
2004 evergreen_setup_scratch_buffers(rctx);
2005 } else {
2006 r600_setup_scratch_buffers(rctx);
2007 }
2008
2009 /* on R600 we stuff masks + txq info into one constant buffer */
2010 /* on evergreen we only need a txq info one */
2011 if (rctx->ps_shader) {
2012 need_buf_const = rctx->ps_shader->current->shader.uses_tex_buffers || rctx->ps_shader->current->shader.has_txq_cube_array_z_comp;
2013 if (need_buf_const) {
2014 if (rctx->b.chip_class < EVERGREEN)
2015 r600_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT);
2016 else
2017 eg_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT);
2018 }
2019 }
2020
2021 if (rctx->vs_shader) {
2022 need_buf_const = rctx->vs_shader->current->shader.uses_tex_buffers || rctx->vs_shader->current->shader.has_txq_cube_array_z_comp;
2023 if (need_buf_const) {
2024 if (rctx->b.chip_class < EVERGREEN)
2025 r600_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX);
2026 else
2027 eg_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX);
2028 }
2029 }
2030
2031 if (rctx->gs_shader) {
2032 need_buf_const = rctx->gs_shader->current->shader.uses_tex_buffers || rctx->gs_shader->current->shader.has_txq_cube_array_z_comp;
2033 if (need_buf_const) {
2034 if (rctx->b.chip_class < EVERGREEN)
2035 r600_setup_buffer_constants(rctx, PIPE_SHADER_GEOMETRY);
2036 else
2037 eg_setup_buffer_constants(rctx, PIPE_SHADER_GEOMETRY);
2038 }
2039 }
2040
2041 if (rctx->tes_shader) {
2042 assert(rctx->b.chip_class >= EVERGREEN);
2043 need_buf_const = rctx->tes_shader->current->shader.uses_tex_buffers ||
2044 rctx->tes_shader->current->shader.has_txq_cube_array_z_comp;
2045 if (need_buf_const) {
2046 eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_EVAL);
2047 }
2048 if (rctx->tcs_shader) {
2049 need_buf_const = rctx->tcs_shader->current->shader.uses_tex_buffers ||
2050 rctx->tcs_shader->current->shader.has_txq_cube_array_z_comp;
2051 if (need_buf_const) {
2052 eg_setup_buffer_constants(rctx, PIPE_SHADER_TESS_CTRL);
2053 }
2054 }
2055 }
2056
2057 r600_update_driver_const_buffers(rctx, false);
2058
2059 if (rctx->b.chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) {
2060 if (!r600_adjust_gprs(rctx)) {
2061 /* discard rendering */
2062 return false;
2063 }
2064 }
2065
2066 if (rctx->b.chip_class == EVERGREEN) {
2067 if (!evergreen_adjust_gprs(rctx)) {
2068 /* discard rendering */
2069 return false;
2070 }
2071 }
2072
2073 blend_disable = (rctx->dual_src_blend &&
2074 rctx->ps_shader->current->nr_ps_color_outputs < 2);
2075
2076 if (blend_disable != rctx->force_blend_disable) {
2077 rctx->force_blend_disable = blend_disable;
2078 r600_bind_blend_state_internal(rctx,
2079 rctx->blend_state.cso,
2080 blend_disable);
2081 }
2082
2083 return true;
2084 }
2085
r600_emit_clip_misc_state(struct r600_context * rctx,struct r600_atom * atom)2086 void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom)
2087 {
2088 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2089 struct r600_clip_misc_state *state = &rctx->clip_misc_state;
2090
2091 radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
2092 state->pa_cl_clip_cntl |
2093 (state->clip_dist_write ? 0 : state->clip_plane_enable & 0x3F) |
2094 S_028810_CLIP_DISABLE(state->clip_disable));
2095 radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
2096 state->pa_cl_vs_out_cntl |
2097 (state->clip_plane_enable & state->clip_dist_write) |
2098 (state->cull_dist_write << 8));
2099 /* reuse needs to be set off if we write oViewport */
2100 if (rctx->b.chip_class >= EVERGREEN)
2101 radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF,
2102 S_028AB4_REUSE_OFF(state->vs_out_viewport));
2103 }
2104
2105 /* rast_prim is the primitive type after GS. */
r600_emit_rasterizer_prim_state(struct r600_context * rctx)2106 static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
2107 {
2108 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2109 enum pipe_prim_type rast_prim = rctx->current_rast_prim;
2110
2111 /* Skip this if not rendering lines. */
2112 if (rast_prim != PIPE_PRIM_LINES &&
2113 rast_prim != PIPE_PRIM_LINE_LOOP &&
2114 rast_prim != PIPE_PRIM_LINE_STRIP &&
2115 rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
2116 rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
2117 return;
2118
2119 if (rast_prim == rctx->last_rast_prim)
2120 return;
2121
2122 /* For lines, reset the stipple pattern at each primitive. Otherwise,
2123 * reset the stipple pattern at each packet (line strips, line loops).
2124 */
2125 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
2126 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2) |
2127 (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
2128 rctx->last_rast_prim = rast_prim;
2129 }
2130
r600_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)2131 static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info,
2132 unsigned drawid_offset,
2133 const struct pipe_draw_indirect_info *indirect,
2134 const struct pipe_draw_start_count_bias *draws,
2135 unsigned num_draws)
2136 {
2137 if (num_draws > 1) {
2138 util_draw_multi(ctx, info, drawid_offset, indirect, draws, num_draws);
2139 return;
2140 }
2141
2142 struct r600_context *rctx = (struct r600_context *)ctx;
2143 struct pipe_resource *indexbuf = !info->index_size || info->has_user_indices ? NULL : info->index.resource;
2144 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2145 bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
2146 bool has_user_indices = info->index_size && info->has_user_indices;
2147 uint64_t mask;
2148 unsigned num_patches, dirty_tex_counter, index_offset = 0;
2149 unsigned index_size = info->index_size;
2150 int index_bias;
2151 struct r600_shader_atomic combined_atomics[8];
2152 uint8_t atomic_used_mask = 0;
2153 struct pipe_stream_output_target *count_from_so = NULL;
2154
2155 if (indirect && indirect->count_from_stream_output) {
2156 count_from_so = indirect->count_from_stream_output;
2157 indirect = NULL;
2158 }
2159
2160 if (!indirect && !draws[0].count && (index_size || !count_from_so)) {
2161 return;
2162 }
2163
2164 if (unlikely(!rctx->vs_shader)) {
2165 assert(0);
2166 return;
2167 }
2168 if (unlikely(!rctx->ps_shader &&
2169 (!rctx->rasterizer || !rctx->rasterizer->rasterizer_discard))) {
2170 assert(0);
2171 return;
2172 }
2173
2174 /* make sure that the gfx ring is only one active */
2175 if (radeon_emitted(&rctx->b.dma.cs, 0)) {
2176 rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
2177 }
2178
2179 if (rctx->cmd_buf_is_compute) {
2180 rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
2181 rctx->cmd_buf_is_compute = false;
2182 }
2183
2184 /* Re-emit the framebuffer state if needed. */
2185 dirty_tex_counter = p_atomic_read(&rctx->b.screen->dirty_tex_counter);
2186 if (unlikely(dirty_tex_counter != rctx->b.last_dirty_tex_counter)) {
2187 rctx->b.last_dirty_tex_counter = dirty_tex_counter;
2188 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
2189 rctx->framebuffer.do_update_surf_dirtiness = true;
2190 }
2191
2192 if (rctx->gs_shader) {
2193 /* Determine whether the GS triangle strip adjacency fix should
2194 * be applied. Rotate every other triangle if
2195 * - triangle strips with adjacency are fed to the GS and
2196 * - primitive restart is disabled (the rotation doesn't help
2197 * when the restart occurs after an odd number of triangles).
2198 */
2199 bool gs_tri_strip_adj_fix =
2200 !rctx->tes_shader &&
2201 info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
2202 !info->primitive_restart;
2203 if (gs_tri_strip_adj_fix != rctx->gs_tri_strip_adj_fix)
2204 rctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
2205 }
2206 if (!r600_update_derived_state(rctx)) {
2207 /* useless to render because current rendering command
2208 * can't be achieved
2209 */
2210 return;
2211 }
2212
2213 rctx->current_rast_prim = (rctx->gs_shader)? rctx->gs_shader->gs_output_prim
2214 : (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
2215 : info->mode;
2216
2217 if (rctx->b.chip_class >= EVERGREEN) {
2218 evergreen_emit_atomic_buffer_setup_count(rctx, NULL, combined_atomics, &atomic_used_mask);
2219 }
2220
2221 if (index_size) {
2222 index_offset += draws[0].start * index_size;
2223
2224 /* Translate 8-bit indices to 16-bit. */
2225 if (unlikely(index_size == 1)) {
2226 struct pipe_resource *out_buffer = NULL;
2227 unsigned out_offset;
2228 void *ptr;
2229 unsigned start, count;
2230
2231 if (likely(!indirect)) {
2232 start = 0;
2233 count = draws[0].count;
2234 }
2235 else {
2236 /* Have to get start/count from indirect buffer, slow path ahead... */
2237 struct r600_resource *indirect_resource = (struct r600_resource *)indirect->buffer;
2238 unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
2239 PIPE_MAP_READ);
2240 if (data) {
2241 data += indirect->offset / sizeof(unsigned);
2242 start = data[2] * index_size;
2243 count = data[0];
2244 }
2245 else {
2246 start = 0;
2247 count = 0;
2248 }
2249 }
2250
2251 u_upload_alloc(ctx->stream_uploader, start, count * 2,
2252 256, &out_offset, &out_buffer, &ptr);
2253 if (unlikely(!ptr))
2254 return;
2255
2256 util_shorten_ubyte_elts_to_userptr(
2257 &rctx->b.b, info, 0, 0, index_offset, count, ptr);
2258
2259 indexbuf = out_buffer;
2260 index_offset = out_offset;
2261 index_size = 2;
2262 has_user_indices = false;
2263 }
2264
2265 /* Upload the index buffer.
2266 * The upload is skipped for small index counts on little-endian machines
2267 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
2268 * Indirect draws never use immediate indices.
2269 * Note: Instanced rendering in combination with immediate indices hangs. */
2270 if (has_user_indices && (R600_BIG_ENDIAN || indirect ||
2271 info->instance_count > 1 ||
2272 draws[0].count*index_size > 20)) {
2273 unsigned start_offset = draws[0].start * index_size;
2274 indexbuf = NULL;
2275 u_upload_data(ctx->stream_uploader, start_offset,
2276 draws[0].count * index_size, 256,
2277 (char*)info->index.user + start_offset,
2278 &index_offset, &indexbuf);
2279 index_offset -= start_offset;
2280 has_user_indices = false;
2281 }
2282 index_bias = draws->index_bias;
2283 } else {
2284 index_bias = indirect ? 0 : draws[0].start;
2285 }
2286
2287 /* Set the index offset and primitive restart. */
2288 bool restart_index_changed = info->primitive_restart &&
2289 rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info->restart_index;
2290
2291 if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info->primitive_restart ||
2292 restart_index_changed ||
2293 rctx->vgt_state.vgt_indx_offset != index_bias ||
2294 (rctx->vgt_state.last_draw_was_indirect && !indirect)) {
2295 rctx->vgt_state.vgt_multi_prim_ib_reset_en = info->primitive_restart;
2296 rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info->restart_index;
2297 rctx->vgt_state.vgt_indx_offset = index_bias;
2298 r600_mark_atom_dirty(rctx, &rctx->vgt_state.atom);
2299 }
2300
2301 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
2302 if (rctx->b.chip_class == R600) {
2303 rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH;
2304 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
2305 }
2306
2307 if (rctx->b.chip_class >= EVERGREEN)
2308 evergreen_setup_tess_constants(rctx, info, &num_patches);
2309
2310 /* Emit states. */
2311 r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE, util_bitcount(atomic_used_mask));
2312 r600_flush_emit(rctx);
2313
2314 mask = rctx->dirty_atoms;
2315 while (mask != 0) {
2316 r600_emit_atom(rctx, rctx->atoms[u_bit_scan64(&mask)]);
2317 }
2318
2319 if (rctx->b.chip_class >= EVERGREEN) {
2320 evergreen_emit_atomic_buffer_setup(rctx, false, combined_atomics, atomic_used_mask);
2321 }
2322
2323 if (rctx->b.chip_class == CAYMAN) {
2324 /* Copied from radeonsi. */
2325 unsigned primgroup_size = 128; /* recommended without a GS */
2326 bool ia_switch_on_eop = false;
2327 bool partial_vs_wave = false;
2328
2329 if (rctx->gs_shader)
2330 primgroup_size = 64; /* recommended with a GS */
2331
2332 if ((rctx->rasterizer && rctx->rasterizer->pa_sc_line_stipple) ||
2333 (rctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
2334 ia_switch_on_eop = true;
2335 }
2336
2337 if (r600_get_strmout_en(&rctx->b))
2338 partial_vs_wave = true;
2339
2340 radeon_set_context_reg(cs, CM_R_028AA8_IA_MULTI_VGT_PARAM,
2341 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
2342 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
2343 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1));
2344 }
2345
2346 if (rctx->b.chip_class >= EVERGREEN) {
2347 uint32_t ls_hs_config = evergreen_get_ls_hs_config(rctx, info,
2348 num_patches);
2349
2350 evergreen_set_ls_hs_config(rctx, cs, ls_hs_config);
2351 evergreen_set_lds_alloc(rctx, cs, rctx->lds_alloc);
2352 }
2353
2354 /* On R6xx, CULL_FRONT=1 culls all points, lines, and rectangles,
2355 * even though it should have no effect on those. */
2356 if (rctx->b.chip_class == R600 && rctx->rasterizer) {
2357 unsigned su_sc_mode_cntl = rctx->rasterizer->pa_su_sc_mode_cntl;
2358 unsigned prim = info->mode;
2359
2360 if (rctx->gs_shader) {
2361 prim = rctx->gs_shader->gs_output_prim;
2362 }
2363 prim = r600_conv_prim_to_gs_out(prim); /* decrease the number of types to 3 */
2364
2365 if (prim == V_028A6C_OUTPRIM_TYPE_POINTLIST ||
2366 prim == V_028A6C_OUTPRIM_TYPE_LINESTRIP ||
2367 info->mode == R600_PRIM_RECTANGLE_LIST) {
2368 su_sc_mode_cntl &= C_028814_CULL_FRONT;
2369 }
2370 radeon_set_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, su_sc_mode_cntl);
2371 }
2372
2373 /* Update start instance. */
2374 if (!indirect && rctx->last_start_instance != info->start_instance) {
2375 radeon_set_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info->start_instance);
2376 rctx->last_start_instance = info->start_instance;
2377 }
2378
2379 /* Update the primitive type. */
2380 if (rctx->last_primitive_type != info->mode) {
2381 r600_emit_rasterizer_prim_state(rctx);
2382 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE,
2383 r600_conv_pipe_prim(info->mode));
2384
2385 rctx->last_primitive_type = info->mode;
2386 }
2387
2388 /* Draw packets. */
2389 if (likely(!indirect)) {
2390 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
2391 radeon_emit(cs, info->instance_count);
2392 } else {
2393 uint64_t va = r600_resource(indirect->buffer)->gpu_address;
2394 assert(rctx->b.chip_class >= EVERGREEN);
2395
2396 // Invalidate so non-indirect draw calls reset this state
2397 rctx->vgt_state.last_draw_was_indirect = true;
2398 rctx->last_start_instance = -1;
2399
2400 radeon_emit(cs, PKT3(EG_PKT3_SET_BASE, 2, 0));
2401 radeon_emit(cs, EG_DRAW_INDEX_INDIRECT_PATCH_TABLE_BASE);
2402 radeon_emit(cs, va);
2403 radeon_emit(cs, (va >> 32UL) & 0xFF);
2404
2405 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2406 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
2407 (struct r600_resource*)indirect->buffer,
2408 RADEON_USAGE_READ |
2409 RADEON_PRIO_DRAW_INDIRECT));
2410 }
2411
2412 if (index_size) {
2413 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
2414 radeon_emit(cs, index_size == 4 ?
2415 (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
2416 (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
2417
2418 if (has_user_indices) {
2419 unsigned size_bytes = draws[0].count*index_size;
2420 unsigned size_dw = align(size_bytes, 4) / 4;
2421 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
2422 radeon_emit(cs, draws[0].count);
2423 radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
2424 radeon_emit_array(cs, info->index.user + draws[0].start * index_size, size_dw);
2425 } else {
2426 uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
2427
2428 if (likely(!indirect)) {
2429 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
2430 radeon_emit(cs, va);
2431 radeon_emit(cs, (va >> 32UL) & 0xFF);
2432 radeon_emit(cs, draws[0].count);
2433 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
2434 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2435 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
2436 (struct r600_resource*)indexbuf,
2437 RADEON_USAGE_READ |
2438 RADEON_PRIO_INDEX_BUFFER));
2439 }
2440 else {
2441 uint32_t max_size = (indexbuf->width0 - index_offset) / index_size;
2442
2443 radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0));
2444 radeon_emit(cs, va);
2445 radeon_emit(cs, (va >> 32UL) & 0xFF);
2446
2447 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2448 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
2449 (struct r600_resource*)indexbuf,
2450 RADEON_USAGE_READ |
2451 RADEON_PRIO_INDEX_BUFFER));
2452
2453 radeon_emit(cs, PKT3(EG_PKT3_INDEX_BUFFER_SIZE, 0, 0));
2454 radeon_emit(cs, max_size);
2455
2456 radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit));
2457 radeon_emit(cs, indirect->offset);
2458 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
2459 }
2460 }
2461 } else {
2462 if (unlikely(count_from_so)) {
2463 struct r600_so_target *t = (struct r600_so_target*)count_from_so;
2464 uint64_t va = t->buf_filled_size->gpu_address + t->buf_filled_size_offset;
2465
2466 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
2467
2468 radeon_emit(cs, PKT3(PKT3_COPY_DW, 4, 0));
2469 radeon_emit(cs, COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG);
2470 radeon_emit(cs, va & 0xFFFFFFFFUL); /* src address lo */
2471 radeon_emit(cs, (va >> 32UL) & 0xFFUL); /* src address hi */
2472 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2); /* dst register */
2473 radeon_emit(cs, 0); /* unused */
2474
2475 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2476 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
2477 t->buf_filled_size, RADEON_USAGE_READ |
2478 RADEON_PRIO_SO_FILLED_SIZE));
2479 }
2480
2481 if (likely(!indirect)) {
2482 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
2483 radeon_emit(cs, draws[0].count);
2484 }
2485 else {
2486 radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDIRECT, 1, render_cond_bit));
2487 radeon_emit(cs, indirect->offset);
2488 }
2489 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
2490 (count_from_so ? S_0287F0_USE_OPAQUE(1) : 0));
2491 }
2492
2493 /* SMX returns CONTEXT_DONE too early workaround */
2494 if (rctx->b.family == CHIP_R600 ||
2495 rctx->b.family == CHIP_RV610 ||
2496 rctx->b.family == CHIP_RV630 ||
2497 rctx->b.family == CHIP_RV635) {
2498 /* if we have gs shader or streamout
2499 we need to do a wait idle after every draw */
2500 if (rctx->gs_shader || r600_get_strmout_en(&rctx->b)) {
2501 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
2502 }
2503 }
2504
2505 /* ES ring rolling over at EOP - workaround */
2506 if (rctx->b.chip_class == R600) {
2507 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2508 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SQ_NON_EVENT));
2509 }
2510
2511
2512 if (rctx->b.chip_class >= EVERGREEN)
2513 evergreen_emit_atomic_buffer_save(rctx, false, combined_atomics, &atomic_used_mask);
2514
2515 if (rctx->trace_buf)
2516 eg_trace_emit(rctx);
2517
2518 if (rctx->framebuffer.do_update_surf_dirtiness) {
2519 /* Set the depth buffer as dirty. */
2520 if (rctx->framebuffer.state.zsbuf) {
2521 struct pipe_surface *surf = rctx->framebuffer.state.zsbuf;
2522 struct r600_texture *rtex = (struct r600_texture *)surf->texture;
2523
2524 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
2525
2526 if (rtex->surface.has_stencil)
2527 rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
2528 }
2529 if (rctx->framebuffer.compressed_cb_mask) {
2530 struct pipe_surface *surf;
2531 struct r600_texture *rtex;
2532 unsigned mask = rctx->framebuffer.compressed_cb_mask;
2533
2534 do {
2535 unsigned i = u_bit_scan(&mask);
2536 surf = rctx->framebuffer.state.cbufs[i];
2537 rtex = (struct r600_texture*)surf->texture;
2538
2539 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
2540
2541 } while (mask);
2542 }
2543 rctx->framebuffer.do_update_surf_dirtiness = false;
2544 }
2545
2546 if (index_size && indexbuf != info->index.resource)
2547 pipe_resource_reference(&indexbuf, NULL);
2548 rctx->b.num_draw_calls++;
2549 }
2550
r600_translate_stencil_op(int s_op)2551 uint32_t r600_translate_stencil_op(int s_op)
2552 {
2553 switch (s_op) {
2554 case PIPE_STENCIL_OP_KEEP:
2555 return V_028800_STENCIL_KEEP;
2556 case PIPE_STENCIL_OP_ZERO:
2557 return V_028800_STENCIL_ZERO;
2558 case PIPE_STENCIL_OP_REPLACE:
2559 return V_028800_STENCIL_REPLACE;
2560 case PIPE_STENCIL_OP_INCR:
2561 return V_028800_STENCIL_INCR;
2562 case PIPE_STENCIL_OP_DECR:
2563 return V_028800_STENCIL_DECR;
2564 case PIPE_STENCIL_OP_INCR_WRAP:
2565 return V_028800_STENCIL_INCR_WRAP;
2566 case PIPE_STENCIL_OP_DECR_WRAP:
2567 return V_028800_STENCIL_DECR_WRAP;
2568 case PIPE_STENCIL_OP_INVERT:
2569 return V_028800_STENCIL_INVERT;
2570 default:
2571 R600_ERR("Unknown stencil op %d", s_op);
2572 assert(0);
2573 break;
2574 }
2575 return 0;
2576 }
2577
r600_translate_fill(uint32_t func)2578 uint32_t r600_translate_fill(uint32_t func)
2579 {
2580 switch(func) {
2581 case PIPE_POLYGON_MODE_FILL:
2582 return 2;
2583 case PIPE_POLYGON_MODE_LINE:
2584 return 1;
2585 case PIPE_POLYGON_MODE_POINT:
2586 return 0;
2587 default:
2588 assert(0);
2589 return 0;
2590 }
2591 }
2592
r600_tex_wrap(unsigned wrap)2593 unsigned r600_tex_wrap(unsigned wrap)
2594 {
2595 switch (wrap) {
2596 default:
2597 case PIPE_TEX_WRAP_REPEAT:
2598 return V_03C000_SQ_TEX_WRAP;
2599 case PIPE_TEX_WRAP_CLAMP:
2600 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER;
2601 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
2602 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL;
2603 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
2604 return V_03C000_SQ_TEX_CLAMP_BORDER;
2605 case PIPE_TEX_WRAP_MIRROR_REPEAT:
2606 return V_03C000_SQ_TEX_MIRROR;
2607 case PIPE_TEX_WRAP_MIRROR_CLAMP:
2608 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER;
2609 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
2610 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
2611 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
2612 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER;
2613 }
2614 }
2615
r600_tex_mipfilter(unsigned filter)2616 unsigned r600_tex_mipfilter(unsigned filter)
2617 {
2618 switch (filter) {
2619 case PIPE_TEX_MIPFILTER_NEAREST:
2620 return V_03C000_SQ_TEX_Z_FILTER_POINT;
2621 case PIPE_TEX_MIPFILTER_LINEAR:
2622 return V_03C000_SQ_TEX_Z_FILTER_LINEAR;
2623 default:
2624 case PIPE_TEX_MIPFILTER_NONE:
2625 return V_03C000_SQ_TEX_Z_FILTER_NONE;
2626 }
2627 }
2628
r600_tex_compare(unsigned compare)2629 unsigned r600_tex_compare(unsigned compare)
2630 {
2631 switch (compare) {
2632 default:
2633 case PIPE_FUNC_NEVER:
2634 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER;
2635 case PIPE_FUNC_LESS:
2636 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS;
2637 case PIPE_FUNC_EQUAL:
2638 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL;
2639 case PIPE_FUNC_LEQUAL:
2640 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
2641 case PIPE_FUNC_GREATER:
2642 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER;
2643 case PIPE_FUNC_NOTEQUAL:
2644 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
2645 case PIPE_FUNC_GEQUAL:
2646 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
2647 case PIPE_FUNC_ALWAYS:
2648 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS;
2649 }
2650 }
2651
wrap_mode_uses_border_color(unsigned wrap,bool linear_filter)2652 static bool wrap_mode_uses_border_color(unsigned wrap, bool linear_filter)
2653 {
2654 return wrap == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
2655 wrap == PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER ||
2656 (linear_filter &&
2657 (wrap == PIPE_TEX_WRAP_CLAMP ||
2658 wrap == PIPE_TEX_WRAP_MIRROR_CLAMP));
2659 }
2660
sampler_state_needs_border_color(const struct pipe_sampler_state * state)2661 bool sampler_state_needs_border_color(const struct pipe_sampler_state *state)
2662 {
2663 bool linear_filter = state->min_img_filter != PIPE_TEX_FILTER_NEAREST ||
2664 state->mag_img_filter != PIPE_TEX_FILTER_NEAREST;
2665
2666 return (state->border_color.ui[0] || state->border_color.ui[1] ||
2667 state->border_color.ui[2] || state->border_color.ui[3]) &&
2668 (wrap_mode_uses_border_color(state->wrap_s, linear_filter) ||
2669 wrap_mode_uses_border_color(state->wrap_t, linear_filter) ||
2670 wrap_mode_uses_border_color(state->wrap_r, linear_filter));
2671 }
2672
r600_emit_shader(struct r600_context * rctx,struct r600_atom * a)2673 void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a)
2674 {
2675
2676 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
2677 struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader;
2678
2679 if (!shader)
2680 return;
2681
2682 r600_emit_command_buffer(cs, &shader->command_buffer);
2683 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2684 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->bo,
2685 RADEON_USAGE_READ | RADEON_PRIO_SHADER_BINARY));
2686 }
2687
r600_get_swizzle_combined(const unsigned char * swizzle_format,const unsigned char * swizzle_view,boolean vtx)2688 unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
2689 const unsigned char *swizzle_view,
2690 boolean vtx)
2691 {
2692 unsigned i;
2693 unsigned char swizzle[4];
2694 unsigned result = 0;
2695 const uint32_t tex_swizzle_shift[4] = {
2696 16, 19, 22, 25,
2697 };
2698 const uint32_t vtx_swizzle_shift[4] = {
2699 3, 6, 9, 12,
2700 };
2701 const uint32_t swizzle_bit[4] = {
2702 0, 1, 2, 3,
2703 };
2704 const uint32_t *swizzle_shift = tex_swizzle_shift;
2705
2706 if (vtx)
2707 swizzle_shift = vtx_swizzle_shift;
2708
2709 if (swizzle_view) {
2710 util_format_compose_swizzles(swizzle_format, swizzle_view, swizzle);
2711 } else {
2712 memcpy(swizzle, swizzle_format, 4);
2713 }
2714
2715 /* Get swizzle. */
2716 for (i = 0; i < 4; i++) {
2717 switch (swizzle[i]) {
2718 case PIPE_SWIZZLE_Y:
2719 result |= swizzle_bit[1] << swizzle_shift[i];
2720 break;
2721 case PIPE_SWIZZLE_Z:
2722 result |= swizzle_bit[2] << swizzle_shift[i];
2723 break;
2724 case PIPE_SWIZZLE_W:
2725 result |= swizzle_bit[3] << swizzle_shift[i];
2726 break;
2727 case PIPE_SWIZZLE_0:
2728 result |= V_038010_SQ_SEL_0 << swizzle_shift[i];
2729 break;
2730 case PIPE_SWIZZLE_1:
2731 result |= V_038010_SQ_SEL_1 << swizzle_shift[i];
2732 break;
2733 default: /* PIPE_SWIZZLE_X */
2734 result |= swizzle_bit[0] << swizzle_shift[i];
2735 }
2736 }
2737 return result;
2738 }
2739
2740 /* texture format translate */
r600_translate_texformat(struct pipe_screen * screen,enum pipe_format format,const unsigned char * swizzle_view,uint32_t * word4_p,uint32_t * yuv_format_p,bool do_endian_swap)2741 uint32_t r600_translate_texformat(struct pipe_screen *screen,
2742 enum pipe_format format,
2743 const unsigned char *swizzle_view,
2744 uint32_t *word4_p, uint32_t *yuv_format_p,
2745 bool do_endian_swap)
2746 {
2747 struct r600_screen *rscreen = (struct r600_screen *)screen;
2748 uint32_t result = 0, word4 = 0, yuv_format = 0;
2749 const struct util_format_description *desc;
2750 boolean uniform = TRUE;
2751 bool is_srgb_valid = FALSE;
2752 const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0};
2753 const unsigned char swizzle_yyyy[4] = {1, 1, 1, 1};
2754 const unsigned char swizzle_xxxy[4] = {0, 0, 0, 1};
2755 const unsigned char swizzle_zyx1[4] = {2, 1, 0, 5};
2756 const unsigned char swizzle_zyxw[4] = {2, 1, 0, 3};
2757
2758 int i;
2759 const uint32_t sign_bit[4] = {
2760 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
2761 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED),
2762 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED),
2763 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED)
2764 };
2765
2766 /* Need to replace the specified texture formats in case of big-endian.
2767 * These formats are formats that have channels with number of bits
2768 * not divisible by 8.
2769 * Mesa conversion functions don't swap bits for those formats, and because
2770 * we transmit this over a serial bus to the GPU (PCIe), the
2771 * bit-endianess is important!!!
2772 * In case we have an "opposite" format, just use that for the swizzling
2773 * information. If we don't have such an "opposite" format, we need
2774 * to use a fixed swizzle info instead (see below)
2775 */
2776 if (format == PIPE_FORMAT_R4A4_UNORM && do_endian_swap)
2777 format = PIPE_FORMAT_A4R4_UNORM;
2778
2779 desc = util_format_description(format);
2780 if (!desc)
2781 goto out_unknown;
2782
2783 /* Depth and stencil swizzling is handled separately. */
2784 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS) {
2785 /* Need to check for specific texture formats that don't have
2786 * an "opposite" format we can use. For those formats, we directly
2787 * specify the swizzling, which is the LE swizzling as defined in
2788 * u_format.csv
2789 */
2790 if (do_endian_swap) {
2791 if (format == PIPE_FORMAT_L4A4_UNORM)
2792 word4 |= r600_get_swizzle_combined(swizzle_xxxy, swizzle_view, FALSE);
2793 else if (format == PIPE_FORMAT_B4G4R4A4_UNORM)
2794 word4 |= r600_get_swizzle_combined(swizzle_zyxw, swizzle_view, FALSE);
2795 else if (format == PIPE_FORMAT_B4G4R4X4_UNORM || format == PIPE_FORMAT_B5G6R5_UNORM)
2796 word4 |= r600_get_swizzle_combined(swizzle_zyx1, swizzle_view, FALSE);
2797 else
2798 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view, FALSE);
2799 } else {
2800 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view, FALSE);
2801 }
2802 }
2803
2804 /* Colorspace (return non-RGB formats directly). */
2805 switch (desc->colorspace) {
2806 /* Depth stencil formats */
2807 case UTIL_FORMAT_COLORSPACE_ZS:
2808 switch (format) {
2809 /* Depth sampler formats. */
2810 case PIPE_FORMAT_Z16_UNORM:
2811 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE);
2812 result = FMT_16;
2813 goto out_word4;
2814 case PIPE_FORMAT_Z24X8_UNORM:
2815 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
2816 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE);
2817 result = FMT_8_24;
2818 goto out_word4;
2819 case PIPE_FORMAT_X8Z24_UNORM:
2820 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
2821 if (rscreen->b.chip_class < EVERGREEN)
2822 goto out_unknown;
2823 word4 |= r600_get_swizzle_combined(swizzle_yyyy, swizzle_view, FALSE);
2824 result = FMT_24_8;
2825 goto out_word4;
2826 case PIPE_FORMAT_Z32_FLOAT:
2827 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE);
2828 result = FMT_32_FLOAT;
2829 goto out_word4;
2830 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
2831 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE);
2832 result = FMT_X24_8_32_FLOAT;
2833 goto out_word4;
2834 /* Stencil sampler formats. */
2835 case PIPE_FORMAT_S8_UINT:
2836 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
2837 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE);
2838 result = FMT_8;
2839 goto out_word4;
2840 case PIPE_FORMAT_X24S8_UINT:
2841 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
2842 word4 |= r600_get_swizzle_combined(swizzle_yyyy, swizzle_view, FALSE);
2843 result = FMT_8_24;
2844 goto out_word4;
2845 case PIPE_FORMAT_S8X24_UINT:
2846 if (rscreen->b.chip_class < EVERGREEN)
2847 goto out_unknown;
2848 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
2849 word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE);
2850 result = FMT_24_8;
2851 goto out_word4;
2852 case PIPE_FORMAT_X32_S8X24_UINT:
2853 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
2854 word4 |= r600_get_swizzle_combined(swizzle_yyyy, swizzle_view, FALSE);
2855 result = FMT_X24_8_32_FLOAT;
2856 goto out_word4;
2857 default:
2858 goto out_unknown;
2859 }
2860
2861 case UTIL_FORMAT_COLORSPACE_YUV:
2862 yuv_format |= (1 << 30);
2863 switch (format) {
2864 case PIPE_FORMAT_UYVY:
2865 case PIPE_FORMAT_YUYV:
2866 default:
2867 break;
2868 }
2869 goto out_unknown; /* XXX */
2870
2871 case UTIL_FORMAT_COLORSPACE_SRGB:
2872 word4 |= S_038010_FORCE_DEGAMMA(1);
2873 break;
2874
2875 default:
2876 break;
2877 }
2878
2879 if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
2880 switch (format) {
2881 case PIPE_FORMAT_RGTC1_SNORM:
2882 case PIPE_FORMAT_LATC1_SNORM:
2883 word4 |= sign_bit[0];
2884 FALLTHROUGH;
2885 case PIPE_FORMAT_RGTC1_UNORM:
2886 case PIPE_FORMAT_LATC1_UNORM:
2887 result = FMT_BC4;
2888 goto out_word4;
2889 case PIPE_FORMAT_RGTC2_SNORM:
2890 case PIPE_FORMAT_LATC2_SNORM:
2891 word4 |= sign_bit[0] | sign_bit[1];
2892 FALLTHROUGH;
2893 case PIPE_FORMAT_RGTC2_UNORM:
2894 case PIPE_FORMAT_LATC2_UNORM:
2895 result = FMT_BC5;
2896 goto out_word4;
2897 default:
2898 goto out_unknown;
2899 }
2900 }
2901
2902 if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
2903 switch (format) {
2904 case PIPE_FORMAT_DXT1_RGB:
2905 case PIPE_FORMAT_DXT1_RGBA:
2906 case PIPE_FORMAT_DXT1_SRGB:
2907 case PIPE_FORMAT_DXT1_SRGBA:
2908 result = FMT_BC1;
2909 is_srgb_valid = TRUE;
2910 goto out_word4;
2911 case PIPE_FORMAT_DXT3_RGBA:
2912 case PIPE_FORMAT_DXT3_SRGBA:
2913 result = FMT_BC2;
2914 is_srgb_valid = TRUE;
2915 goto out_word4;
2916 case PIPE_FORMAT_DXT5_RGBA:
2917 case PIPE_FORMAT_DXT5_SRGBA:
2918 result = FMT_BC3;
2919 is_srgb_valid = TRUE;
2920 goto out_word4;
2921 default:
2922 goto out_unknown;
2923 }
2924 }
2925
2926 if (desc->layout == UTIL_FORMAT_LAYOUT_BPTC) {
2927 if (rscreen->b.chip_class < EVERGREEN)
2928 goto out_unknown;
2929
2930 switch (format) {
2931 case PIPE_FORMAT_BPTC_RGBA_UNORM:
2932 case PIPE_FORMAT_BPTC_SRGBA:
2933 result = FMT_BC7;
2934 is_srgb_valid = TRUE;
2935 goto out_word4;
2936 case PIPE_FORMAT_BPTC_RGB_FLOAT:
2937 word4 |= sign_bit[0] | sign_bit[1] | sign_bit[2];
2938 FALLTHROUGH;
2939 case PIPE_FORMAT_BPTC_RGB_UFLOAT:
2940 result = FMT_BC6;
2941 goto out_word4;
2942 default:
2943 goto out_unknown;
2944 }
2945 }
2946
2947 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) {
2948 switch (format) {
2949 case PIPE_FORMAT_R8G8_B8G8_UNORM:
2950 case PIPE_FORMAT_G8R8_B8R8_UNORM:
2951 result = FMT_GB_GR;
2952 goto out_word4;
2953 case PIPE_FORMAT_G8R8_G8B8_UNORM:
2954 case PIPE_FORMAT_R8G8_R8B8_UNORM:
2955 result = FMT_BG_RG;
2956 goto out_word4;
2957 default:
2958 goto out_unknown;
2959 }
2960 }
2961
2962 if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
2963 result = FMT_5_9_9_9_SHAREDEXP;
2964 goto out_word4;
2965 } else if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
2966 result = FMT_10_11_11_FLOAT;
2967 goto out_word4;
2968 }
2969
2970
2971 for (i = 0; i < desc->nr_channels; i++) {
2972 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2973 word4 |= sign_bit[i];
2974 }
2975 }
2976
2977 /* R8G8Bx_SNORM - XXX CxV8U8 */
2978
2979 /* See whether the components are of the same size. */
2980 for (i = 1; i < desc->nr_channels; i++) {
2981 uniform = uniform && desc->channel[0].size == desc->channel[i].size;
2982 }
2983
2984 /* Non-uniform formats. */
2985 if (!uniform) {
2986 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB &&
2987 desc->channel[0].pure_integer)
2988 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
2989 switch(desc->nr_channels) {
2990 case 3:
2991 if (desc->channel[0].size == 5 &&
2992 desc->channel[1].size == 6 &&
2993 desc->channel[2].size == 5) {
2994 result = FMT_5_6_5;
2995 goto out_word4;
2996 }
2997 goto out_unknown;
2998 case 4:
2999 if (desc->channel[0].size == 5 &&
3000 desc->channel[1].size == 5 &&
3001 desc->channel[2].size == 5 &&
3002 desc->channel[3].size == 1) {
3003 result = FMT_1_5_5_5;
3004 goto out_word4;
3005 }
3006 if (desc->channel[0].size == 10 &&
3007 desc->channel[1].size == 10 &&
3008 desc->channel[2].size == 10 &&
3009 desc->channel[3].size == 2) {
3010 result = FMT_2_10_10_10;
3011 goto out_word4;
3012 }
3013 goto out_unknown;
3014 }
3015 goto out_unknown;
3016 }
3017
3018 /* Find the first non-VOID channel. */
3019 for (i = 0; i < 4; i++) {
3020 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
3021 break;
3022 }
3023 }
3024
3025 if (i == 4)
3026 goto out_unknown;
3027
3028 /* uniform formats */
3029 switch (desc->channel[i].type) {
3030 case UTIL_FORMAT_TYPE_UNSIGNED:
3031 case UTIL_FORMAT_TYPE_SIGNED:
3032 #if 0
3033 if (!desc->channel[i].normalized &&
3034 desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
3035 goto out_unknown;
3036 }
3037 #endif
3038 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB &&
3039 desc->channel[i].pure_integer)
3040 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
3041
3042 switch (desc->channel[i].size) {
3043 case 4:
3044 switch (desc->nr_channels) {
3045 case 2:
3046 result = FMT_4_4;
3047 goto out_word4;
3048 case 4:
3049 result = FMT_4_4_4_4;
3050 goto out_word4;
3051 }
3052 goto out_unknown;
3053 case 8:
3054 switch (desc->nr_channels) {
3055 case 1:
3056 result = FMT_8;
3057 is_srgb_valid = TRUE;
3058 goto out_word4;
3059 case 2:
3060 result = FMT_8_8;
3061 goto out_word4;
3062 case 4:
3063 result = FMT_8_8_8_8;
3064 is_srgb_valid = TRUE;
3065 goto out_word4;
3066 }
3067 goto out_unknown;
3068 case 16:
3069 switch (desc->nr_channels) {
3070 case 1:
3071 result = FMT_16;
3072 goto out_word4;
3073 case 2:
3074 result = FMT_16_16;
3075 goto out_word4;
3076 case 4:
3077 result = FMT_16_16_16_16;
3078 goto out_word4;
3079 }
3080 goto out_unknown;
3081 case 32:
3082 switch (desc->nr_channels) {
3083 case 1:
3084 result = FMT_32;
3085 goto out_word4;
3086 case 2:
3087 result = FMT_32_32;
3088 goto out_word4;
3089 case 4:
3090 result = FMT_32_32_32_32;
3091 goto out_word4;
3092 }
3093 }
3094 goto out_unknown;
3095
3096 case UTIL_FORMAT_TYPE_FLOAT:
3097 switch (desc->channel[i].size) {
3098 case 16:
3099 switch (desc->nr_channels) {
3100 case 1:
3101 result = FMT_16_FLOAT;
3102 goto out_word4;
3103 case 2:
3104 result = FMT_16_16_FLOAT;
3105 goto out_word4;
3106 case 4:
3107 result = FMT_16_16_16_16_FLOAT;
3108 goto out_word4;
3109 }
3110 goto out_unknown;
3111 case 32:
3112 switch (desc->nr_channels) {
3113 case 1:
3114 result = FMT_32_FLOAT;
3115 goto out_word4;
3116 case 2:
3117 result = FMT_32_32_FLOAT;
3118 goto out_word4;
3119 case 4:
3120 result = FMT_32_32_32_32_FLOAT;
3121 goto out_word4;
3122 }
3123 }
3124 goto out_unknown;
3125 }
3126
3127 out_word4:
3128
3129 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB && !is_srgb_valid)
3130 return ~0;
3131 if (word4_p)
3132 *word4_p = word4;
3133 if (yuv_format_p)
3134 *yuv_format_p = yuv_format;
3135 return result;
3136 out_unknown:
3137 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
3138 return ~0;
3139 }
3140
r600_translate_colorformat(enum chip_class chip,enum pipe_format format,bool do_endian_swap)3141 uint32_t r600_translate_colorformat(enum chip_class chip, enum pipe_format format,
3142 bool do_endian_swap)
3143 {
3144 const struct util_format_description *desc = util_format_description(format);
3145 int channel = util_format_get_first_non_void_channel(format);
3146 bool is_float;
3147 if (!desc)
3148 return ~0U;
3149
3150 #define HAS_SIZE(x,y,z,w) \
3151 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
3152 desc->channel[2].size == (z) && desc->channel[3].size == (w))
3153
3154 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
3155 return V_0280A0_COLOR_10_11_11_FLOAT;
3156
3157 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN ||
3158 channel == -1)
3159 return ~0U;
3160
3161 is_float = desc->channel[channel].type == UTIL_FORMAT_TYPE_FLOAT;
3162
3163 switch (desc->nr_channels) {
3164 case 1:
3165 switch (desc->channel[0].size) {
3166 case 8:
3167 return V_0280A0_COLOR_8;
3168 case 16:
3169 if (is_float)
3170 return V_0280A0_COLOR_16_FLOAT;
3171 else
3172 return V_0280A0_COLOR_16;
3173 case 32:
3174 if (is_float)
3175 return V_0280A0_COLOR_32_FLOAT;
3176 else
3177 return V_0280A0_COLOR_32;
3178 }
3179 break;
3180 case 2:
3181 if (desc->channel[0].size == desc->channel[1].size) {
3182 switch (desc->channel[0].size) {
3183 case 4:
3184 if (chip <= R700)
3185 return V_0280A0_COLOR_4_4;
3186 else
3187 return ~0U; /* removed on Evergreen */
3188 case 8:
3189 return V_0280A0_COLOR_8_8;
3190 case 16:
3191 if (is_float)
3192 return V_0280A0_COLOR_16_16_FLOAT;
3193 else
3194 return V_0280A0_COLOR_16_16;
3195 case 32:
3196 if (is_float)
3197 return V_0280A0_COLOR_32_32_FLOAT;
3198 else
3199 return V_0280A0_COLOR_32_32;
3200 }
3201 } else if (HAS_SIZE(8,24,0,0)) {
3202 return (do_endian_swap ? V_0280A0_COLOR_8_24 : V_0280A0_COLOR_24_8);
3203 } else if (HAS_SIZE(24,8,0,0)) {
3204 return V_0280A0_COLOR_8_24;
3205 }
3206 break;
3207 case 3:
3208 if (HAS_SIZE(5,6,5,0)) {
3209 return V_0280A0_COLOR_5_6_5;
3210 } else if (HAS_SIZE(32,8,24,0)) {
3211 return V_0280A0_COLOR_X24_8_32_FLOAT;
3212 }
3213 break;
3214 case 4:
3215 if (desc->channel[0].size == desc->channel[1].size &&
3216 desc->channel[0].size == desc->channel[2].size &&
3217 desc->channel[0].size == desc->channel[3].size) {
3218 switch (desc->channel[0].size) {
3219 case 4:
3220 return V_0280A0_COLOR_4_4_4_4;
3221 case 8:
3222 return V_0280A0_COLOR_8_8_8_8;
3223 case 16:
3224 if (is_float)
3225 return V_0280A0_COLOR_16_16_16_16_FLOAT;
3226 else
3227 return V_0280A0_COLOR_16_16_16_16;
3228 case 32:
3229 if (is_float)
3230 return V_0280A0_COLOR_32_32_32_32_FLOAT;
3231 else
3232 return V_0280A0_COLOR_32_32_32_32;
3233 }
3234 } else if (HAS_SIZE(5,5,5,1)) {
3235 return V_0280A0_COLOR_1_5_5_5;
3236 } else if (HAS_SIZE(10,10,10,2)) {
3237 return V_0280A0_COLOR_2_10_10_10;
3238 }
3239 break;
3240 }
3241 return ~0U;
3242 }
3243
r600_colorformat_endian_swap(uint32_t colorformat,bool do_endian_swap)3244 uint32_t r600_colorformat_endian_swap(uint32_t colorformat, bool do_endian_swap)
3245 {
3246 if (R600_BIG_ENDIAN) {
3247 switch(colorformat) {
3248 /* 8-bit buffers. */
3249 case V_0280A0_COLOR_4_4:
3250 case V_0280A0_COLOR_8:
3251 return ENDIAN_NONE;
3252
3253 /* 16-bit buffers. */
3254 case V_0280A0_COLOR_8_8:
3255 /*
3256 * No need to do endian swaps on array formats,
3257 * as mesa<-->pipe formats conversion take into account
3258 * the endianess
3259 */
3260 return ENDIAN_NONE;
3261
3262 case V_0280A0_COLOR_5_6_5:
3263 case V_0280A0_COLOR_1_5_5_5:
3264 case V_0280A0_COLOR_4_4_4_4:
3265 case V_0280A0_COLOR_16:
3266 return (do_endian_swap ? ENDIAN_8IN16 : ENDIAN_NONE);
3267
3268 /* 32-bit buffers. */
3269 case V_0280A0_COLOR_8_8_8_8:
3270 /*
3271 * No need to do endian swaps on array formats,
3272 * as mesa<-->pipe formats conversion take into account
3273 * the endianess
3274 */
3275 return ENDIAN_NONE;
3276
3277 case V_0280A0_COLOR_2_10_10_10:
3278 case V_0280A0_COLOR_8_24:
3279 case V_0280A0_COLOR_24_8:
3280 case V_0280A0_COLOR_32_FLOAT:
3281 return (do_endian_swap ? ENDIAN_8IN32 : ENDIAN_NONE);
3282
3283 case V_0280A0_COLOR_16_16_FLOAT:
3284 case V_0280A0_COLOR_16_16:
3285 return ENDIAN_8IN16;
3286
3287 /* 64-bit buffers. */
3288 case V_0280A0_COLOR_16_16_16_16:
3289 case V_0280A0_COLOR_16_16_16_16_FLOAT:
3290 return ENDIAN_8IN16;
3291
3292 case V_0280A0_COLOR_32_32_FLOAT:
3293 case V_0280A0_COLOR_32_32:
3294 case V_0280A0_COLOR_X24_8_32_FLOAT:
3295 return ENDIAN_8IN32;
3296
3297 /* 128-bit buffers. */
3298 case V_0280A0_COLOR_32_32_32_32_FLOAT:
3299 case V_0280A0_COLOR_32_32_32_32:
3300 return ENDIAN_8IN32;
3301 default:
3302 return ENDIAN_NONE; /* Unsupported. */
3303 }
3304 } else {
3305 return ENDIAN_NONE;
3306 }
3307 }
3308
r600_invalidate_buffer(struct pipe_context * ctx,struct pipe_resource * buf)3309 static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
3310 {
3311 struct r600_context *rctx = (struct r600_context*)ctx;
3312 struct r600_resource *rbuffer = r600_resource(buf);
3313 unsigned i, shader, mask;
3314 struct r600_pipe_sampler_view *view;
3315
3316 /* Reallocate the buffer in the same pipe_resource. */
3317 r600_alloc_resource(&rctx->screen->b, rbuffer);
3318
3319 /* We changed the buffer, now we need to bind it where the old one was bound. */
3320 /* Vertex buffers. */
3321 mask = rctx->vertex_buffer_state.enabled_mask;
3322 while (mask) {
3323 i = u_bit_scan(&mask);
3324 if (rctx->vertex_buffer_state.vb[i].buffer.resource == &rbuffer->b.b) {
3325 rctx->vertex_buffer_state.dirty_mask |= 1 << i;
3326 r600_vertex_buffers_dirty(rctx);
3327 }
3328 }
3329 /* Streamout buffers. */
3330 for (i = 0; i < rctx->b.streamout.num_targets; i++) {
3331 if (rctx->b.streamout.targets[i] &&
3332 rctx->b.streamout.targets[i]->b.buffer == &rbuffer->b.b) {
3333 if (rctx->b.streamout.begin_emitted) {
3334 r600_emit_streamout_end(&rctx->b);
3335 }
3336 rctx->b.streamout.append_bitmask = rctx->b.streamout.enabled_mask;
3337 r600_streamout_buffers_dirty(&rctx->b);
3338 }
3339 }
3340
3341 /* Constant buffers. */
3342 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
3343 struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
3344 bool found = false;
3345 uint32_t mask = state->enabled_mask;
3346
3347 while (mask) {
3348 unsigned i = u_bit_scan(&mask);
3349 if (state->cb[i].buffer == &rbuffer->b.b) {
3350 found = true;
3351 state->dirty_mask |= 1 << i;
3352 }
3353 }
3354 if (found) {
3355 r600_constant_buffers_dirty(rctx, state);
3356 }
3357 }
3358
3359 /* Texture buffer objects - update the virtual addresses in descriptors. */
3360 LIST_FOR_EACH_ENTRY(view, &rctx->texture_buffers, list) {
3361 if (view->base.texture == &rbuffer->b.b) {
3362 uint64_t offset = view->base.u.buf.offset;
3363 uint64_t va = rbuffer->gpu_address + offset;
3364
3365 view->tex_resource_words[0] = va;
3366 view->tex_resource_words[2] &= C_038008_BASE_ADDRESS_HI;
3367 view->tex_resource_words[2] |= S_038008_BASE_ADDRESS_HI(va >> 32);
3368 }
3369 }
3370 /* Texture buffer objects - make bindings dirty if needed. */
3371 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
3372 struct r600_samplerview_state *state = &rctx->samplers[shader].views;
3373 bool found = false;
3374 uint32_t mask = state->enabled_mask;
3375
3376 while (mask) {
3377 unsigned i = u_bit_scan(&mask);
3378 if (state->views[i]->base.texture == &rbuffer->b.b) {
3379 found = true;
3380 state->dirty_mask |= 1 << i;
3381 }
3382 }
3383 if (found) {
3384 r600_sampler_views_dirty(rctx, state);
3385 }
3386 }
3387
3388 /* SSBOs */
3389 struct r600_image_state *istate = &rctx->fragment_buffers;
3390 {
3391 uint32_t mask = istate->enabled_mask;
3392 bool found = false;
3393 while (mask) {
3394 unsigned i = u_bit_scan(&mask);
3395 if (istate->views[i].base.resource == &rbuffer->b.b) {
3396 found = true;
3397 istate->dirty_mask |= 1 << i;
3398 }
3399 }
3400 if (found) {
3401 r600_mark_atom_dirty(rctx, &istate->atom);
3402 }
3403 }
3404
3405 }
3406
r600_set_active_query_state(struct pipe_context * ctx,bool enable)3407 static void r600_set_active_query_state(struct pipe_context *ctx, bool enable)
3408 {
3409 struct r600_context *rctx = (struct r600_context*)ctx;
3410
3411 /* Pipeline stat & streamout queries. */
3412 if (enable) {
3413 rctx->b.flags &= ~R600_CONTEXT_STOP_PIPELINE_STATS;
3414 rctx->b.flags |= R600_CONTEXT_START_PIPELINE_STATS;
3415 } else {
3416 rctx->b.flags &= ~R600_CONTEXT_START_PIPELINE_STATS;
3417 rctx->b.flags |= R600_CONTEXT_STOP_PIPELINE_STATS;
3418 }
3419
3420 /* Occlusion queries. */
3421 if (rctx->db_misc_state.occlusion_queries_disabled != !enable) {
3422 rctx->db_misc_state.occlusion_queries_disabled = !enable;
3423 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
3424 }
3425 }
3426
r600_need_gfx_cs_space(struct pipe_context * ctx,unsigned num_dw,bool include_draw_vbo)3427 static void r600_need_gfx_cs_space(struct pipe_context *ctx, unsigned num_dw,
3428 bool include_draw_vbo)
3429 {
3430 r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo, 0);
3431 }
3432
3433 /* keep this at the end of this file, please */
r600_init_common_state_functions(struct r600_context * rctx)3434 void r600_init_common_state_functions(struct r600_context *rctx)
3435 {
3436 rctx->b.b.create_fs_state = r600_create_ps_state;
3437 rctx->b.b.create_vs_state = r600_create_vs_state;
3438 rctx->b.b.create_gs_state = r600_create_gs_state;
3439 rctx->b.b.create_tcs_state = r600_create_tcs_state;
3440 rctx->b.b.create_tes_state = r600_create_tes_state;
3441 rctx->b.b.create_vertex_elements_state = r600_create_vertex_fetch_shader;
3442 rctx->b.b.bind_blend_state = r600_bind_blend_state;
3443 rctx->b.b.bind_depth_stencil_alpha_state = r600_bind_dsa_state;
3444 rctx->b.b.bind_sampler_states = r600_bind_sampler_states;
3445 rctx->b.b.bind_fs_state = r600_bind_ps_state;
3446 rctx->b.b.bind_rasterizer_state = r600_bind_rs_state;
3447 rctx->b.b.bind_vertex_elements_state = r600_bind_vertex_elements;
3448 rctx->b.b.bind_vs_state = r600_bind_vs_state;
3449 rctx->b.b.bind_gs_state = r600_bind_gs_state;
3450 rctx->b.b.bind_tcs_state = r600_bind_tcs_state;
3451 rctx->b.b.bind_tes_state = r600_bind_tes_state;
3452 rctx->b.b.delete_blend_state = r600_delete_blend_state;
3453 rctx->b.b.delete_depth_stencil_alpha_state = r600_delete_dsa_state;
3454 rctx->b.b.delete_fs_state = r600_delete_ps_state;
3455 rctx->b.b.delete_rasterizer_state = r600_delete_rs_state;
3456 rctx->b.b.delete_sampler_state = r600_delete_sampler_state;
3457 rctx->b.b.delete_vertex_elements_state = r600_delete_vertex_elements;
3458 rctx->b.b.delete_vs_state = r600_delete_vs_state;
3459 rctx->b.b.delete_gs_state = r600_delete_gs_state;
3460 rctx->b.b.delete_tcs_state = r600_delete_tcs_state;
3461 rctx->b.b.delete_tes_state = r600_delete_tes_state;
3462 rctx->b.b.set_blend_color = r600_set_blend_color;
3463 rctx->b.b.set_clip_state = r600_set_clip_state;
3464 rctx->b.b.set_constant_buffer = r600_set_constant_buffer;
3465 rctx->b.b.set_sample_mask = r600_set_sample_mask;
3466 rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
3467 rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
3468 rctx->b.b.set_sampler_views = r600_set_sampler_views;
3469 rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
3470 rctx->b.b.memory_barrier = r600_memory_barrier;
3471 rctx->b.b.texture_barrier = r600_texture_barrier;
3472 rctx->b.b.set_stream_output_targets = r600_set_streamout_targets;
3473 rctx->b.b.set_active_query_state = r600_set_active_query_state;
3474
3475 rctx->b.b.draw_vbo = r600_draw_vbo;
3476 rctx->b.invalidate_buffer = r600_invalidate_buffer;
3477 rctx->b.need_gfx_cs_space = r600_need_gfx_cs_space;
3478 }
3479