1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include "pipe/p_state.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31 #include "util/u_string.h"
32
33 #include "freedreno_resource.h"
34 #include "freedreno_state.h"
35
36 #include "fd6_context.h"
37 #include "fd6_draw.h"
38 #include "fd6_emit.h"
39 #include "fd6_format.h"
40 #include "fd6_program.h"
41 #include "fd6_vsc.h"
42 #include "fd6_zsa.h"
43
44 #include "fd6_pack.h"
45
46 static void
draw_emit_xfb(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect)47 draw_emit_xfb(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
48 const struct pipe_draw_info *info,
49 const struct pipe_draw_indirect_info *indirect)
50 {
51 struct fd_stream_output_target *target =
52 fd_stream_output_target(indirect->count_from_stream_output);
53 struct fd_resource *offset = fd_resource(target->offset_buf);
54
55 /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
56 * Plus, for the common case where the counter buffer is written by
57 * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
58 * complete which means we need a WAIT_FOR_ME anyway.
59 */
60 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
61
62 OUT_PKT7(ring, CP_DRAW_AUTO, 6);
63 OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
64 OUT_RING(ring, info->instance_count);
65 OUT_RELOC(ring, offset->bo, 0, 0, 0);
66 OUT_RING(
67 ring,
68 0); /* byte counter offset subtraced from the value read from above */
69 OUT_RING(ring, target->stride);
70 }
71
72 static void
draw_emit_indirect(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,unsigned index_offset)73 draw_emit_indirect(struct fd_ringbuffer *ring,
74 struct CP_DRAW_INDX_OFFSET_0 *draw0,
75 const struct pipe_draw_info *info,
76 const struct pipe_draw_indirect_info *indirect,
77 unsigned index_offset)
78 {
79 struct fd_resource *ind = fd_resource(indirect->buffer);
80
81 if (info->index_size) {
82 struct pipe_resource *idx = info->index.resource;
83 unsigned max_indices = (idx->width0 - index_offset) / info->index_size;
84
85 OUT_PKT(ring, CP_DRAW_INDX_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
86 A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE(fd_resource(idx)->bo,
87 index_offset),
88 A5XX_CP_DRAW_INDX_INDIRECT_3(.max_indices = max_indices),
89 A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT(ind->bo, indirect->offset));
90 } else {
91 OUT_PKT(ring, CP_DRAW_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
92 A5XX_CP_DRAW_INDIRECT_INDIRECT(ind->bo, indirect->offset));
93 }
94 }
95
96 static void
draw_emit(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draw,unsigned index_offset)97 draw_emit(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
98 const struct pipe_draw_info *info,
99 const struct pipe_draw_start_count_bias *draw, unsigned index_offset)
100 {
101 if (info->index_size) {
102 assert(!info->has_user_indices);
103
104 struct pipe_resource *idx_buffer = info->index.resource;
105 unsigned max_indices =
106 (idx_buffer->width0 - index_offset) / info->index_size;
107
108 OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
109 CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
110 CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count),
111 CP_DRAW_INDX_OFFSET_3(.first_indx = draw->start),
112 A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE(fd_resource(idx_buffer)->bo,
113 index_offset),
114 A5XX_CP_DRAW_INDX_OFFSET_6(.max_indices = max_indices));
115 } else {
116 OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
117 CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
118 CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count));
119 }
120 }
121
122 static void
fixup_draw_state(struct fd_context * ctx,struct fd6_emit * emit)123 fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit) assert_dt
124 {
125 if (ctx->last.dirty ||
126 (ctx->last.primitive_restart != emit->primitive_restart)) {
127 /* rasterizer state is effected by primitive-restart: */
128 fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
129 ctx->last.primitive_restart = emit->primitive_restart;
130 }
131 }
132
133 static bool
fd6_draw_vbo(struct fd_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw,unsigned index_offset)134 fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
135 unsigned drawid_offset,
136 const struct pipe_draw_indirect_info *indirect,
137 const struct pipe_draw_start_count_bias *draw,
138 unsigned index_offset) assert_dt
139 {
140 struct fd6_context *fd6_ctx = fd6_context(ctx);
141 struct shader_info *gs_info = ir3_get_shader_info(ctx->prog.gs);
142 struct fd6_emit emit = {
143 .ctx = ctx,
144 .vtx = &ctx->vtx,
145 .info = info,
146 .drawid_offset = drawid_offset,
147 .indirect = indirect,
148 .draw = draw,
149 .key = {
150 .vs = ctx->prog.vs,
151 .gs = ctx->prog.gs,
152 .fs = ctx->prog.fs,
153 .key = {
154 .rasterflat = ctx->rasterizer->flatshade,
155 .layer_zero = !gs_info || !(gs_info->outputs_written & VARYING_BIT_LAYER),
156 .sample_shading = (ctx->min_samples > 1),
157 .msaa = (ctx->framebuffer.samples > 1),
158 },
159 .clip_plane_enable = ctx->rasterizer->clip_plane_enable,
160 },
161 .rasterflat = ctx->rasterizer->flatshade,
162 .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
163 .sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
164 .primitive_restart = info->primitive_restart && info->index_size,
165 .patch_vertices = ctx->patch_vertices,
166 };
167
168 if (!(ctx->prog.vs && ctx->prog.fs))
169 return false;
170
171 if (info->mode == PIPE_PRIM_PATCHES) {
172 emit.key.hs = ctx->prog.hs;
173 emit.key.ds = ctx->prog.ds;
174
175 if (!(ctx->prog.hs && ctx->prog.ds))
176 return false;
177
178 struct shader_info *ds_info = ir3_get_shader_info(emit.key.ds);
179 emit.key.key.tessellation = ir3_tess_mode(ds_info->tess.primitive_mode);
180 ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
181
182 struct shader_info *fs_info = ir3_get_shader_info(emit.key.fs);
183 emit.key.key.tcs_store_primid =
184 BITSET_TEST(ds_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID) ||
185 (gs_info && BITSET_TEST(gs_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID)) ||
186 (fs_info && (fs_info->inputs_read & (1ull << VARYING_SLOT_PRIMITIVE_ID)));
187 }
188
189 if (emit.key.gs) {
190 emit.key.key.has_gs = true;
191 ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
192 }
193
194 if (!(emit.key.hs || emit.key.ds || emit.key.gs || indirect))
195 fd6_vsc_update_sizes(ctx->batch, info, draw);
196
197 ir3_fixup_shader_state(&ctx->base, &emit.key.key);
198
199 if (!(ctx->gen_dirty & BIT(FD6_GROUP_PROG))) {
200 emit.prog = fd6_ctx->prog;
201 } else {
202 fd6_ctx->prog = fd6_emit_get_prog(&emit);
203 }
204
205 /* bail if compile failed: */
206 if (!fd6_ctx->prog)
207 return false;
208
209 fixup_draw_state(ctx, &emit);
210
211 /* *after* fixup_shader_state(): */
212 emit.dirty = ctx->dirty;
213 emit.dirty_groups = ctx->gen_dirty;
214
215 emit.bs = fd6_emit_get_prog(&emit)->bs;
216 emit.vs = fd6_emit_get_prog(&emit)->vs;
217 emit.hs = fd6_emit_get_prog(&emit)->hs;
218 emit.ds = fd6_emit_get_prog(&emit)->ds;
219 emit.gs = fd6_emit_get_prog(&emit)->gs;
220 emit.fs = fd6_emit_get_prog(&emit)->fs;
221
222 if (emit.vs->need_driver_params || fd6_ctx->has_dp_state)
223 emit.dirty_groups |= BIT(FD6_GROUP_VS_DRIVER_PARAMS);
224
225 /* If we are doing xfb, we need to emit the xfb state on every draw: */
226 if (emit.prog->stream_output)
227 emit.dirty_groups |= BIT(FD6_GROUP_SO);
228
229 if (unlikely(ctx->stats_users > 0)) {
230 ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
231 ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
232 ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
233 ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
234 ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
235 }
236
237 struct fd_ringbuffer *ring = ctx->batch->draw;
238
239 struct CP_DRAW_INDX_OFFSET_0 draw0 = {
240 .prim_type = ctx->screen->primtypes[info->mode],
241 .vis_cull = USE_VISIBILITY,
242 .gs_enable = !!emit.key.gs,
243 };
244
245 if (indirect && indirect->count_from_stream_output) {
246 draw0.source_select = DI_SRC_SEL_AUTO_XFB;
247 } else if (info->index_size) {
248 draw0.source_select = DI_SRC_SEL_DMA;
249 draw0.index_size = fd4_size2indextype(info->index_size);
250 } else {
251 draw0.source_select = DI_SRC_SEL_AUTO_INDEX;
252 }
253
254 if (info->mode == PIPE_PRIM_PATCHES) {
255 shader_info *ds_info = &emit.ds->shader->nir->info;
256 uint32_t factor_stride;
257
258 switch (ds_info->tess.primitive_mode) {
259 case GL_ISOLINES:
260 draw0.patch_type = TESS_ISOLINES;
261 factor_stride = 12;
262 break;
263 case GL_TRIANGLES:
264 draw0.patch_type = TESS_TRIANGLES;
265 factor_stride = 20;
266 break;
267 case GL_QUADS:
268 draw0.patch_type = TESS_QUADS;
269 factor_stride = 28;
270 break;
271 default:
272 unreachable("bad tessmode");
273 }
274
275 draw0.prim_type = DI_PT_PATCHES0 + ctx->patch_vertices;
276 draw0.tess_enable = true;
277
278 const unsigned max_count = 2048;
279 unsigned count;
280
281 /**
282 * We can cap tessparam/tessfactor buffer sizes at the sub-draw
283 * limit. But in the indirect-draw case we must assume the worst.
284 */
285 if (indirect && indirect->buffer) {
286 count = ALIGN_NPOT(max_count, ctx->patch_vertices);
287 } else {
288 count = MIN2(max_count, draw->count);
289 count = ALIGN_NPOT(count, ctx->patch_vertices);
290 }
291
292 OUT_PKT7(ring, CP_SET_SUBDRAW_SIZE, 1);
293 OUT_RING(ring, count);
294
295 ctx->batch->tessellation = true;
296 ctx->batch->tessparam_size =
297 MAX2(ctx->batch->tessparam_size, emit.hs->output_size * 4 * count);
298 ctx->batch->tessfactor_size =
299 MAX2(ctx->batch->tessfactor_size, factor_stride * count);
300
301 if (!ctx->batch->tess_addrs_constobj) {
302 /* Reserve space for the bo address - we'll write them later in
303 * setup_tess_buffers(). We need 2 bo address, but indirect
304 * constant upload needs at least 4 vec4s.
305 */
306 unsigned size = 4 * 16;
307
308 ctx->batch->tess_addrs_constobj = fd_submit_new_ringbuffer(
309 ctx->batch->submit, size, FD_RINGBUFFER_STREAMING);
310
311 ctx->batch->tess_addrs_constobj->cur += size;
312 }
313 }
314
315 uint32_t index_start = info->index_size ? draw->index_bias : draw->start;
316 if (ctx->last.dirty || (ctx->last.index_start != index_start)) {
317 OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
318 OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
319 ctx->last.index_start = index_start;
320 }
321
322 if (ctx->last.dirty || (ctx->last.instance_start != info->start_instance)) {
323 OUT_PKT4(ring, REG_A6XX_VFD_INSTANCE_START_OFFSET, 1);
324 OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
325 ctx->last.instance_start = info->start_instance;
326 }
327
328 uint32_t restart_index =
329 info->primitive_restart ? info->restart_index : 0xffffffff;
330 if (ctx->last.dirty || (ctx->last.restart_index != restart_index)) {
331 OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
332 OUT_RING(ring, restart_index); /* PC_RESTART_INDEX */
333 ctx->last.restart_index = restart_index;
334 }
335
336 // TODO move fd6_emit_streamout.. I think..
337 if (emit.dirty_groups)
338 fd6_emit_state(ring, &emit);
339
340 /* for debug after a lock up, write a unique counter value
341 * to scratch7 for each draw, to make it easier to match up
342 * register dumps to cmdstream. The combination of IB
343 * (scratch6) and DRAW is enough to "triangulate" the
344 * particular draw that caused lockup.
345 */
346 emit_marker6(ring, 7);
347
348 if (indirect) {
349 if (indirect->count_from_stream_output) {
350 draw_emit_xfb(ring, &draw0, info, indirect);
351 } else {
352 draw_emit_indirect(ring, &draw0, info, indirect, index_offset);
353 }
354 } else {
355 draw_emit(ring, &draw0, info, draw, index_offset);
356 }
357
358 emit_marker6(ring, 7);
359 fd_reset_wfi(ctx->batch);
360
361 if (emit.streamout_mask) {
362 struct fd_ringbuffer *ring = ctx->batch->draw;
363
364 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
365 if (emit.streamout_mask & (1 << i)) {
366 fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
367 }
368 }
369 }
370
371 fd_context_all_clean(ctx);
372
373 return true;
374 }
375
376 static void
fd6_clear_lrz(struct fd_batch * batch,struct fd_resource * zsbuf,double depth)377 fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth) assert_dt
378 {
379 struct fd_ringbuffer *ring;
380 struct fd_screen *screen = batch->ctx->screen;
381
382 ring = fd_batch_get_prologue(batch);
383
384 emit_marker6(ring, 7);
385 OUT_PKT7(ring, CP_SET_MARKER, 1);
386 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
387 emit_marker6(ring, 7);
388
389 OUT_WFI5(ring);
390
391 OUT_REG(ring, A6XX_RB_CCU_CNTL(.color_offset = screen->ccu_offset_bypass));
392
393 OUT_REG(ring,
394 A6XX_HLSQ_INVALIDATE_CMD(.vs_state = true, .hs_state = true,
395 .ds_state = true, .gs_state = true,
396 .fs_state = true, .cs_state = true,
397 .gfx_ibo = true, .cs_ibo = true,
398 .gfx_shared_const = true,
399 .gfx_bindless = 0x1f, .cs_bindless = 0x1f));
400
401 emit_marker6(ring, 7);
402 OUT_PKT7(ring, CP_SET_MARKER, 1);
403 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE));
404 emit_marker6(ring, 7);
405
406 OUT_PKT4(ring, REG_A6XX_RB_2D_UNKNOWN_8C01, 1);
407 OUT_RING(ring, 0x0);
408
409 OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
410 OUT_RING(ring, 0x00000000);
411 OUT_RING(ring, 0x00000000);
412 OUT_RING(ring, 0x00000000);
413 OUT_RING(ring, 0x00000000);
414 OUT_RING(ring, 0x00000000);
415 OUT_RING(ring, 0x00000000);
416 OUT_RING(ring, 0x00000000);
417 OUT_RING(ring, 0x00000000);
418 OUT_RING(ring, 0x00000000);
419 OUT_RING(ring, 0x00000000);
420 OUT_RING(ring, 0x00000000);
421 OUT_RING(ring, 0x00000000);
422 OUT_RING(ring, 0x00000000);
423
424 OUT_PKT4(ring, REG_A6XX_SP_2D_DST_FORMAT, 1);
425 OUT_RING(ring, 0x0000f410);
426
427 OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
428 OUT_RING(ring,
429 A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
430
431 OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
432 OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
433
434 fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
435 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
436 fd_wfi(batch, ring);
437
438 OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
439 OUT_RING(ring, fui(depth));
440 OUT_RING(ring, 0x00000000);
441 OUT_RING(ring, 0x00000000);
442 OUT_RING(ring, 0x00000000);
443
444 OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
445 OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(FMT6_16_UNORM) |
446 A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
447 A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
448 OUT_RELOC(ring, zsbuf->lrz, 0, 0, 0);
449 OUT_RING(ring, A6XX_RB_2D_DST_PITCH(zsbuf->lrz_pitch * 2).value);
450 OUT_RING(ring, 0x00000000);
451 OUT_RING(ring, 0x00000000);
452 OUT_RING(ring, 0x00000000);
453 OUT_RING(ring, 0x00000000);
454 OUT_RING(ring, 0x00000000);
455
456 OUT_REG(ring, A6XX_GRAS_2D_SRC_TL_X(0), A6XX_GRAS_2D_SRC_BR_X(0),
457 A6XX_GRAS_2D_SRC_TL_Y(0), A6XX_GRAS_2D_SRC_BR_Y(0));
458
459 OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
460 OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) | A6XX_GRAS_2D_DST_TL_Y(0));
461 OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
462 A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
463
464 fd6_event_write(batch, ring, 0x3f, false);
465
466 OUT_WFI5(ring);
467
468 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
469 OUT_RING(ring, screen->info->a6xx.magic.RB_UNKNOWN_8E04_blit);
470
471 OUT_PKT7(ring, CP_BLIT, 1);
472 OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
473
474 OUT_WFI5(ring);
475
476 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
477 OUT_RING(ring, 0x0); /* RB_UNKNOWN_8E04 */
478
479 fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
480 fd6_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
481 fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
482 fd_wfi(batch, ring);
483
484 fd6_cache_inv(batch, ring);
485 }
486
487 static bool
is_z32(enum pipe_format format)488 is_z32(enum pipe_format format)
489 {
490 switch (format) {
491 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
492 case PIPE_FORMAT_Z32_UNORM:
493 case PIPE_FORMAT_Z32_FLOAT:
494 return true;
495 default:
496 return false;
497 }
498 }
499
500 static bool
fd6_clear(struct fd_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)501 fd6_clear(struct fd_context *ctx, unsigned buffers,
502 const union pipe_color_union *color, double depth,
503 unsigned stencil) assert_dt
504 {
505 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
506 const bool has_depth = pfb->zsbuf;
507 unsigned color_buffers = buffers >> 2;
508
509 /* we need to do multisample clear on 3d pipe, so fallback to u_blitter: */
510 if (pfb->samples > 1)
511 return false;
512
513 /* If we're clearing after draws, fallback to 3D pipe clears. We could
514 * use blitter clears in the draw batch but then we'd have to patch up the
515 * gmem offsets. This doesn't seem like a useful thing to optimize for
516 * however.*/
517 if (ctx->batch->num_draws > 0)
518 return false;
519
520 u_foreach_bit (i, color_buffers)
521 ctx->batch->clear_color[i] = *color;
522 if (buffers & PIPE_CLEAR_DEPTH)
523 ctx->batch->clear_depth = depth;
524 if (buffers & PIPE_CLEAR_STENCIL)
525 ctx->batch->clear_stencil = stencil;
526
527 ctx->batch->fast_cleared |= buffers;
528
529 if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
530 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
531 if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
532 zsbuf->lrz_valid = true;
533 zsbuf->lrz_direction = FD_LRZ_UNKNOWN;
534 fd6_clear_lrz(ctx->batch, zsbuf, depth);
535 }
536 }
537
538 return true;
539 }
540
541 void
fd6_draw_init(struct pipe_context * pctx)542 fd6_draw_init(struct pipe_context *pctx) disable_thread_safety_analysis
543 {
544 struct fd_context *ctx = fd_context(pctx);
545 ctx->draw_vbo = fd6_draw_vbo;
546 ctx->clear = fd6_clear;
547 }
548