1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  * Copyright © 2018 Google, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  * Authors:
25  *    Rob Clark <robclark@freedesktop.org>
26  */
27 
28 #include "pipe/p_state.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31 #include "util/u_string.h"
32 
33 #include "freedreno_resource.h"
34 #include "freedreno_state.h"
35 
36 #include "fd6_context.h"
37 #include "fd6_draw.h"
38 #include "fd6_emit.h"
39 #include "fd6_format.h"
40 #include "fd6_program.h"
41 #include "fd6_vsc.h"
42 #include "fd6_zsa.h"
43 
44 #include "fd6_pack.h"
45 
46 static void
draw_emit_xfb(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect)47 draw_emit_xfb(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
48               const struct pipe_draw_info *info,
49               const struct pipe_draw_indirect_info *indirect)
50 {
51    struct fd_stream_output_target *target =
52       fd_stream_output_target(indirect->count_from_stream_output);
53    struct fd_resource *offset = fd_resource(target->offset_buf);
54 
55    /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
56     * Plus, for the common case where the counter buffer is written by
57     * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
58     * complete which means we need a WAIT_FOR_ME anyway.
59     */
60    OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
61 
62    OUT_PKT7(ring, CP_DRAW_AUTO, 6);
63    OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
64    OUT_RING(ring, info->instance_count);
65    OUT_RELOC(ring, offset->bo, 0, 0, 0);
66    OUT_RING(
67       ring,
68       0); /* byte counter offset subtraced from the value read from above */
69    OUT_RING(ring, target->stride);
70 }
71 
72 static void
draw_emit_indirect(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,unsigned index_offset)73 draw_emit_indirect(struct fd_ringbuffer *ring,
74                    struct CP_DRAW_INDX_OFFSET_0 *draw0,
75                    const struct pipe_draw_info *info,
76                    const struct pipe_draw_indirect_info *indirect,
77                    unsigned index_offset)
78 {
79    struct fd_resource *ind = fd_resource(indirect->buffer);
80 
81    if (info->index_size) {
82       struct pipe_resource *idx = info->index.resource;
83       unsigned max_indices = (idx->width0 - index_offset) / info->index_size;
84 
85       OUT_PKT(ring, CP_DRAW_INDX_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
86               A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE(fd_resource(idx)->bo,
87                                                    index_offset),
88               A5XX_CP_DRAW_INDX_INDIRECT_3(.max_indices = max_indices),
89               A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT(ind->bo, indirect->offset));
90    } else {
91       OUT_PKT(ring, CP_DRAW_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
92               A5XX_CP_DRAW_INDIRECT_INDIRECT(ind->bo, indirect->offset));
93    }
94 }
95 
96 static void
draw_emit(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draw,unsigned index_offset)97 draw_emit(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
98           const struct pipe_draw_info *info,
99           const struct pipe_draw_start_count_bias *draw, unsigned index_offset)
100 {
101    if (info->index_size) {
102       assert(!info->has_user_indices);
103 
104       struct pipe_resource *idx_buffer = info->index.resource;
105       unsigned max_indices =
106          (idx_buffer->width0 - index_offset) / info->index_size;
107 
108       OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
109               CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
110               CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count),
111               CP_DRAW_INDX_OFFSET_3(.first_indx = draw->start),
112               A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE(fd_resource(idx_buffer)->bo,
113                                                  index_offset),
114               A5XX_CP_DRAW_INDX_OFFSET_6(.max_indices = max_indices));
115    } else {
116       OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
117               CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
118               CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count));
119    }
120 }
121 
122 static void
fixup_draw_state(struct fd_context * ctx,struct fd6_emit * emit)123 fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit) assert_dt
124 {
125    if (ctx->last.dirty ||
126        (ctx->last.primitive_restart != emit->primitive_restart)) {
127       /* rasterizer state is effected by primitive-restart: */
128       fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
129       ctx->last.primitive_restart = emit->primitive_restart;
130    }
131 }
132 
133 static bool
fd6_draw_vbo(struct fd_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw,unsigned index_offset)134 fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
135              unsigned drawid_offset,
136              const struct pipe_draw_indirect_info *indirect,
137              const struct pipe_draw_start_count_bias *draw,
138              unsigned index_offset) assert_dt
139 {
140    struct fd6_context *fd6_ctx = fd6_context(ctx);
141    struct shader_info *gs_info = ir3_get_shader_info(ctx->prog.gs);
142    struct fd6_emit emit = {
143       .ctx = ctx,
144       .vtx = &ctx->vtx,
145       .info = info,
146       .drawid_offset = drawid_offset,
147       .indirect = indirect,
148       .draw = draw,
149       .key = {
150          .vs = ctx->prog.vs,
151          .gs = ctx->prog.gs,
152          .fs = ctx->prog.fs,
153          .key = {
154             .rasterflat = ctx->rasterizer->flatshade,
155             .layer_zero = !gs_info || !(gs_info->outputs_written & VARYING_BIT_LAYER),
156             .sample_shading = (ctx->min_samples > 1),
157             .msaa = (ctx->framebuffer.samples > 1),
158          },
159          .clip_plane_enable = ctx->rasterizer->clip_plane_enable,
160       },
161       .rasterflat = ctx->rasterizer->flatshade,
162       .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
163       .sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
164       .primitive_restart = info->primitive_restart && info->index_size,
165       .patch_vertices = ctx->patch_vertices,
166    };
167 
168    if (!(ctx->prog.vs && ctx->prog.fs))
169       return false;
170 
171    if (info->mode == PIPE_PRIM_PATCHES) {
172       emit.key.hs = ctx->prog.hs;
173       emit.key.ds = ctx->prog.ds;
174 
175       if (!(ctx->prog.hs && ctx->prog.ds))
176          return false;
177 
178       struct shader_info *ds_info = ir3_get_shader_info(emit.key.ds);
179       emit.key.key.tessellation = ir3_tess_mode(ds_info->tess._primitive_mode);
180       ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
181 
182       struct shader_info *fs_info = ir3_get_shader_info(emit.key.fs);
183       emit.key.key.tcs_store_primid =
184          BITSET_TEST(ds_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID) ||
185          (gs_info && BITSET_TEST(gs_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID)) ||
186          (fs_info && (fs_info->inputs_read & (1ull << VARYING_SLOT_PRIMITIVE_ID)));
187    }
188 
189    if (emit.key.gs) {
190       emit.key.key.has_gs = true;
191       ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
192    }
193 
194    if (!(emit.key.hs || emit.key.ds || emit.key.gs || indirect))
195       fd6_vsc_update_sizes(ctx->batch, info, draw);
196 
197    ir3_fixup_shader_state(&ctx->base, &emit.key.key);
198 
199    if (!(ctx->gen_dirty & BIT(FD6_GROUP_PROG))) {
200       emit.prog = fd6_ctx->prog;
201    } else {
202       fd6_ctx->prog = fd6_emit_get_prog(&emit);
203    }
204 
205    /* bail if compile failed: */
206    if (!fd6_ctx->prog)
207       return false;
208 
209    fixup_draw_state(ctx, &emit);
210 
211    /* *after* fixup_shader_state(): */
212    emit.dirty = ctx->dirty;
213    emit.dirty_groups = ctx->gen_dirty;
214 
215    emit.bs = fd6_emit_get_prog(&emit)->bs;
216    emit.vs = fd6_emit_get_prog(&emit)->vs;
217    emit.hs = fd6_emit_get_prog(&emit)->hs;
218    emit.ds = fd6_emit_get_prog(&emit)->ds;
219    emit.gs = fd6_emit_get_prog(&emit)->gs;
220    emit.fs = fd6_emit_get_prog(&emit)->fs;
221 
222    if (emit.vs->need_driver_params || fd6_ctx->has_dp_state)
223       emit.dirty_groups |= BIT(FD6_GROUP_VS_DRIVER_PARAMS);
224 
225    /* If we are doing xfb, we need to emit the xfb state on every draw: */
226    if (emit.prog->stream_output)
227       emit.dirty_groups |= BIT(FD6_GROUP_SO);
228 
229    if (unlikely(ctx->stats_users > 0)) {
230       ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
231       ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
232       ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
233       ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
234       ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
235    }
236 
237    struct fd_ringbuffer *ring = ctx->batch->draw;
238 
239    struct CP_DRAW_INDX_OFFSET_0 draw0 = {
240       .prim_type = ctx->screen->primtypes[info->mode],
241       .vis_cull = USE_VISIBILITY,
242       .gs_enable = !!emit.key.gs,
243    };
244 
245    if (indirect && indirect->count_from_stream_output) {
246       draw0.source_select = DI_SRC_SEL_AUTO_XFB;
247    } else if (info->index_size) {
248       draw0.source_select = DI_SRC_SEL_DMA;
249       draw0.index_size = fd4_size2indextype(info->index_size);
250    } else {
251       draw0.source_select = DI_SRC_SEL_AUTO_INDEX;
252    }
253 
254    if (info->mode == PIPE_PRIM_PATCHES) {
255       uint32_t factor_stride = ir3_tess_factor_stride(emit.key.key.tessellation);
256 
257       STATIC_ASSERT(IR3_TESS_ISOLINES == TESS_ISOLINES + 1);
258       STATIC_ASSERT(IR3_TESS_TRIANGLES == TESS_TRIANGLES + 1);
259       STATIC_ASSERT(IR3_TESS_QUADS == TESS_QUADS + 1);
260       draw0.patch_type = emit.key.key.tessellation - 1;
261 
262       draw0.prim_type = DI_PT_PATCHES0 + ctx->patch_vertices;
263       draw0.tess_enable = true;
264 
265       /* maximum number of patches that can fit in tess factor/param buffers */
266       uint32_t subdraw_size = MIN2(FD6_TESS_FACTOR_SIZE / factor_stride,
267                                    FD6_TESS_PARAM_SIZE / (emit.hs->output_size * 4));
268       /* convert from # of patches to draw count */
269       subdraw_size *= ctx->patch_vertices;
270 
271       OUT_PKT7(ring, CP_SET_SUBDRAW_SIZE, 1);
272       OUT_RING(ring, subdraw_size);
273 
274       ctx->batch->tessellation = true;
275    }
276 
277 	uint32_t index_start = info->index_size ? draw->index_bias : draw->start;
278    if (ctx->last.dirty || (ctx->last.index_start != index_start)) {
279       OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
280       OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
281       ctx->last.index_start = index_start;
282    }
283 
284    if (ctx->last.dirty || (ctx->last.instance_start != info->start_instance)) {
285       OUT_PKT4(ring, REG_A6XX_VFD_INSTANCE_START_OFFSET, 1);
286       OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
287       ctx->last.instance_start = info->start_instance;
288    }
289 
290    uint32_t restart_index =
291       info->primitive_restart ? info->restart_index : 0xffffffff;
292    if (ctx->last.dirty || (ctx->last.restart_index != restart_index)) {
293       OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
294       OUT_RING(ring, restart_index); /* PC_RESTART_INDEX */
295       ctx->last.restart_index = restart_index;
296    }
297 
298    // TODO move fd6_emit_streamout.. I think..
299    if (emit.dirty_groups)
300       fd6_emit_state(ring, &emit);
301 
302    /* for debug after a lock up, write a unique counter value
303     * to scratch7 for each draw, to make it easier to match up
304     * register dumps to cmdstream.  The combination of IB
305     * (scratch6) and DRAW is enough to "triangulate" the
306     * particular draw that caused lockup.
307     */
308    emit_marker6(ring, 7);
309 
310    if (indirect) {
311       if (indirect->count_from_stream_output) {
312          draw_emit_xfb(ring, &draw0, info, indirect);
313       } else {
314          draw_emit_indirect(ring, &draw0, info, indirect, index_offset);
315       }
316    } else {
317       draw_emit(ring, &draw0, info, draw, index_offset);
318    }
319 
320    emit_marker6(ring, 7);
321    fd_reset_wfi(ctx->batch);
322 
323    if (emit.streamout_mask) {
324       struct fd_ringbuffer *ring = ctx->batch->draw;
325 
326       for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
327          if (emit.streamout_mask & (1 << i)) {
328             fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
329          }
330       }
331    }
332 
333    fd_context_all_clean(ctx);
334 
335    return true;
336 }
337 
338 static void
fd6_clear_lrz(struct fd_batch * batch,struct fd_resource * zsbuf,double depth)339 fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth) assert_dt
340 {
341    struct fd_ringbuffer *ring;
342    struct fd_screen *screen = batch->ctx->screen;
343 
344    ring = fd_batch_get_prologue(batch);
345 
346    emit_marker6(ring, 7);
347    OUT_PKT7(ring, CP_SET_MARKER, 1);
348    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
349    emit_marker6(ring, 7);
350 
351    OUT_WFI5(ring);
352 
353    OUT_REG(ring, A6XX_RB_CCU_CNTL(.color_offset = screen->ccu_offset_bypass));
354 
355    OUT_REG(ring,
356            A6XX_HLSQ_INVALIDATE_CMD(.vs_state = true, .hs_state = true,
357                                     .ds_state = true, .gs_state = true,
358                                     .fs_state = true, .cs_state = true,
359                                     .gfx_ibo = true, .cs_ibo = true,
360                                     .gfx_shared_const = true,
361                                     .gfx_bindless = 0x1f, .cs_bindless = 0x1f));
362 
363    emit_marker6(ring, 7);
364    OUT_PKT7(ring, CP_SET_MARKER, 1);
365    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE));
366    emit_marker6(ring, 7);
367 
368    OUT_PKT4(ring, REG_A6XX_RB_2D_UNKNOWN_8C01, 1);
369    OUT_RING(ring, 0x0);
370 
371    OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
372    OUT_RING(ring, 0x00000000);
373    OUT_RING(ring, 0x00000000);
374    OUT_RING(ring, 0x00000000);
375    OUT_RING(ring, 0x00000000);
376    OUT_RING(ring, 0x00000000);
377    OUT_RING(ring, 0x00000000);
378    OUT_RING(ring, 0x00000000);
379    OUT_RING(ring, 0x00000000);
380    OUT_RING(ring, 0x00000000);
381    OUT_RING(ring, 0x00000000);
382    OUT_RING(ring, 0x00000000);
383    OUT_RING(ring, 0x00000000);
384    OUT_RING(ring, 0x00000000);
385 
386    OUT_PKT4(ring, REG_A6XX_SP_2D_DST_FORMAT, 1);
387    OUT_RING(ring, 0x0000f410);
388 
389    OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
390    OUT_RING(ring,
391             A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
392 
393    OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
394    OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
395 
396    fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
397    fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
398    fd_wfi(batch, ring);
399 
400    OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
401    OUT_RING(ring, fui(depth));
402    OUT_RING(ring, 0x00000000);
403    OUT_RING(ring, 0x00000000);
404    OUT_RING(ring, 0x00000000);
405 
406    OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
407    OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(FMT6_16_UNORM) |
408                      A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
409                      A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
410    OUT_RELOC(ring, zsbuf->lrz, 0, 0, 0);
411    OUT_RING(ring, A6XX_RB_2D_DST_PITCH(zsbuf->lrz_pitch * 2).value);
412    OUT_RING(ring, 0x00000000);
413    OUT_RING(ring, 0x00000000);
414    OUT_RING(ring, 0x00000000);
415    OUT_RING(ring, 0x00000000);
416    OUT_RING(ring, 0x00000000);
417 
418    OUT_REG(ring, A6XX_GRAS_2D_SRC_TL_X(0), A6XX_GRAS_2D_SRC_BR_X(0),
419            A6XX_GRAS_2D_SRC_TL_Y(0), A6XX_GRAS_2D_SRC_BR_Y(0));
420 
421    OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
422    OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) | A6XX_GRAS_2D_DST_TL_Y(0));
423    OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
424                      A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
425 
426    fd6_event_write(batch, ring, 0x3f, false);
427 
428    OUT_WFI5(ring);
429 
430    OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
431    OUT_RING(ring, screen->info->a6xx.magic.RB_UNKNOWN_8E04_blit);
432 
433    OUT_PKT7(ring, CP_BLIT, 1);
434    OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
435 
436    OUT_WFI5(ring);
437 
438    OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
439    OUT_RING(ring, 0x0); /* RB_UNKNOWN_8E04 */
440 
441    fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
442    fd6_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
443    fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
444    fd_wfi(batch, ring);
445 
446    fd6_cache_inv(batch, ring);
447 }
448 
449 static bool
is_z32(enum pipe_format format)450 is_z32(enum pipe_format format)
451 {
452    switch (format) {
453    case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
454    case PIPE_FORMAT_Z32_UNORM:
455    case PIPE_FORMAT_Z32_FLOAT:
456       return true;
457    default:
458       return false;
459    }
460 }
461 
462 static bool
fd6_clear(struct fd_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)463 fd6_clear(struct fd_context *ctx, unsigned buffers,
464           const union pipe_color_union *color, double depth,
465           unsigned stencil) assert_dt
466 {
467    struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
468    const bool has_depth = pfb->zsbuf;
469    unsigned color_buffers = buffers >> 2;
470 
471    /* we need to do multisample clear on 3d pipe, so fallback to u_blitter: */
472    if (pfb->samples > 1)
473       return false;
474 
475    /* If we're clearing after draws, fallback to 3D pipe clears.  We could
476     * use blitter clears in the draw batch but then we'd have to patch up the
477     * gmem offsets. This doesn't seem like a useful thing to optimize for
478     * however.*/
479    if (ctx->batch->num_draws > 0)
480       return false;
481 
482    u_foreach_bit (i, color_buffers)
483       ctx->batch->clear_color[i] = *color;
484    if (buffers & PIPE_CLEAR_DEPTH)
485       ctx->batch->clear_depth = depth;
486    if (buffers & PIPE_CLEAR_STENCIL)
487       ctx->batch->clear_stencil = stencil;
488 
489    ctx->batch->fast_cleared |= buffers;
490 
491    if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
492       struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
493       if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
494          zsbuf->lrz_valid = true;
495          zsbuf->lrz_direction = FD_LRZ_UNKNOWN;
496          fd6_clear_lrz(ctx->batch, zsbuf, depth);
497       }
498    }
499 
500    return true;
501 }
502 
503 void
fd6_draw_init(struct pipe_context * pctx)504 fd6_draw_init(struct pipe_context *pctx) disable_thread_safety_analysis
505 {
506    struct fd_context *ctx = fd_context(pctx);
507    ctx->draw_vbo = fd6_draw_vbo;
508    ctx->clear = fd6_clear;
509 }
510