1 /*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/u_blitter.h"
26 #include "util/u_draw.h"
27 #include "util/u_prim.h"
28 #include "util/format/u_format.h"
29 #include "util/u_pack_color.h"
30 #include "util/u_split_draw.h"
31 #include "util/u_upload_mgr.h"
32
33 #include "vc4_context.h"
34 #include "vc4_resource.h"
35
36 #define VC4_HW_2116_COUNT 0x1ef0
37
38 static void
vc4_get_draw_cl_space(struct vc4_job * job,int vert_count)39 vc4_get_draw_cl_space(struct vc4_job *job, int vert_count)
40 {
41 /* The SW-5891 workaround may cause us to emit multiple shader recs
42 * and draw packets.
43 */
44 int num_draws = DIV_ROUND_UP(vert_count, 65535 - 2) + 1;
45
46 /* Binner gets our packet state -- vc4_emit.c contents,
47 * and the primitive itself.
48 */
49 cl_ensure_space(&job->bcl,
50 256 + (VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE +
51 VC4_PACKET_GL_SHADER_STATE_SIZE) * num_draws);
52
53 /* Nothing for rcl -- that's covered by vc4_context.c */
54
55 /* shader_rec gets up to 12 dwords of reloc handles plus a maximally
56 * sized shader_rec (104 bytes base for 8 vattrs plus 32 bytes of
57 * vattr stride).
58 */
59 cl_ensure_space(&job->shader_rec,
60 (12 * sizeof(uint32_t) + 104 + 8 * 32) * num_draws);
61
62 /* Uniforms are covered by vc4_write_uniforms(). */
63
64 /* There could be up to 16 textures per stage, plus misc other
65 * pointers.
66 */
67 cl_ensure_space(&job->bo_handles, (2 * 16 + 20) * sizeof(uint32_t));
68 cl_ensure_space(&job->bo_pointers,
69 (2 * 16 + 20) * sizeof(struct vc4_bo *));
70 }
71
72 /**
73 * Does the initial bining command list setup for drawing to a given FBO.
74 */
75 static void
vc4_start_draw(struct vc4_context * vc4)76 vc4_start_draw(struct vc4_context *vc4)
77 {
78 struct vc4_job *job = vc4->job;
79
80 if (job->needs_flush)
81 return;
82
83 vc4_get_draw_cl_space(job, 0);
84
85 cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION, bin) {
86 bin.width_in_tiles = job->draw_tiles_x;
87 bin.height_in_tiles = job->draw_tiles_y;
88 bin.multisample_mode_4x = job->msaa;
89 }
90
91 /* START_TILE_BINNING resets the statechange counters in the hardware,
92 * which are what is used when a primitive is binned to a tile to
93 * figure out what new state packets need to be written to that tile's
94 * command list.
95 */
96 cl_emit(&job->bcl, START_TILE_BINNING, start);
97
98 /* Reset the current compressed primitives format. This gets modified
99 * by VC4_PACKET_GL_INDEXED_PRIMITIVE and
100 * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start
101 * of every tile.
102 */
103 cl_emit(&job->bcl, PRIMITIVE_LIST_FORMAT, list) {
104 list.data_type = _16_BIT_INDEX;
105 list.primitive_type = TRIANGLES_LIST;
106 }
107
108 job->needs_flush = true;
109 job->draw_width = vc4->framebuffer.width;
110 job->draw_height = vc4->framebuffer.height;
111 }
112
113 static void
vc4_predraw_check_textures(struct pipe_context * pctx,struct vc4_texture_stateobj * stage_tex)114 vc4_predraw_check_textures(struct pipe_context *pctx,
115 struct vc4_texture_stateobj *stage_tex)
116 {
117 struct vc4_context *vc4 = vc4_context(pctx);
118
119 for (int i = 0; i < stage_tex->num_textures; i++) {
120 struct vc4_sampler_view *view =
121 vc4_sampler_view(stage_tex->textures[i]);
122 if (!view)
123 continue;
124
125 if (view->texture != view->base.texture)
126 vc4_update_shadow_baselevel_texture(pctx, &view->base);
127
128 vc4_flush_jobs_writing_resource(vc4, view->texture);
129 }
130 }
131
132 static void
vc4_emit_gl_shader_state(struct vc4_context * vc4,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draws,uint32_t extra_index_bias)133 vc4_emit_gl_shader_state(struct vc4_context *vc4,
134 const struct pipe_draw_info *info,
135 const struct pipe_draw_start_count_bias *draws,
136 uint32_t extra_index_bias)
137 {
138 struct vc4_job *job = vc4->job;
139 /* VC4_DIRTY_VTXSTATE */
140 struct vc4_vertex_stateobj *vtx = vc4->vtx;
141 /* VC4_DIRTY_VTXBUF */
142 struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf;
143
144 /* The simulator throws a fit if VS or CS don't read an attribute, so
145 * we emit a dummy read.
146 */
147 uint32_t num_elements_emit = MAX2(vtx->num_elements, 1);
148
149 /* Emit the shader record. */
150 cl_start_shader_reloc(&job->shader_rec, 3 + num_elements_emit);
151
152 cl_emit(&job->shader_rec, SHADER_RECORD, rec) {
153 rec.enable_clipping = true;
154
155 /* VC4_DIRTY_COMPILED_FS */
156 rec.fragment_shader_is_single_threaded =
157 !vc4->prog.fs->fs_threaded;
158
159 /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */
160 rec.point_size_included_in_shaded_vertex_data =
161 (info->mode == PIPE_PRIM_POINTS &&
162 vc4->rasterizer->base.point_size_per_vertex);
163
164 /* VC4_DIRTY_COMPILED_FS */
165 rec.fragment_shader_number_of_varyings =
166 vc4->prog.fs->num_inputs;
167 rec.fragment_shader_code_address =
168 cl_address(vc4->prog.fs->bo, 0);
169
170 rec.coordinate_shader_attribute_array_select_bits =
171 vc4->prog.cs->vattrs_live;
172 rec.coordinate_shader_total_attributes_size =
173 vc4->prog.cs->vattr_offsets[8];
174 rec.coordinate_shader_code_address =
175 cl_address(vc4->prog.cs->bo, 0);
176
177 rec.vertex_shader_attribute_array_select_bits =
178 vc4->prog.vs->vattrs_live;
179 rec.vertex_shader_total_attributes_size =
180 vc4->prog.vs->vattr_offsets[8];
181 rec.vertex_shader_code_address =
182 cl_address(vc4->prog.vs->bo, 0);
183 };
184
185 uint32_t max_index = 0xffff;
186 unsigned index_bias = info->index_size ? draws->index_bias : 0;
187 for (int i = 0; i < vtx->num_elements; i++) {
188 struct pipe_vertex_element *elem = &vtx->pipe[i];
189 struct pipe_vertex_buffer *vb =
190 &vertexbuf->vb[elem->vertex_buffer_index];
191 struct vc4_resource *rsc = vc4_resource(vb->buffer.resource);
192 /* not vc4->dirty tracked: vc4->last_index_bias */
193 uint32_t offset = (vb->buffer_offset +
194 elem->src_offset +
195 vb->stride * (index_bias +
196 extra_index_bias));
197 uint32_t vb_size = rsc->bo->size - offset;
198 uint32_t elem_size =
199 util_format_get_blocksize(elem->src_format);
200
201 cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
202 attr.address = cl_address(rsc->bo, offset);
203 attr.number_of_bytes_minus_1 = elem_size - 1;
204 attr.stride = vb->stride;
205 attr.coordinate_shader_vpm_offset =
206 vc4->prog.cs->vattr_offsets[i];
207 attr.vertex_shader_vpm_offset =
208 vc4->prog.vs->vattr_offsets[i];
209 }
210
211 if (vb->stride > 0) {
212 max_index = MIN2(max_index,
213 (vb_size - elem_size) / vb->stride);
214 }
215 }
216
217 if (vtx->num_elements == 0) {
218 assert(num_elements_emit == 1);
219 struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO");
220
221 cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
222 attr.address = cl_address(bo, 0);
223 attr.number_of_bytes_minus_1 = 16 - 1;
224 attr.stride = 0;
225 attr.coordinate_shader_vpm_offset = 0;
226 attr.vertex_shader_vpm_offset = 0;
227 }
228
229 vc4_bo_unreference(&bo);
230 }
231
232 cl_emit(&job->bcl, GL_SHADER_STATE, shader_state) {
233 /* Note that number of attributes == 0 in the packet means 8
234 * attributes. This field also contains the offset into
235 * shader_rec.
236 */
237 assert(vtx->num_elements <= 8);
238 shader_state.number_of_attribute_arrays =
239 num_elements_emit & 0x7;
240 }
241
242 vc4_write_uniforms(vc4, vc4->prog.fs,
243 &vc4->constbuf[PIPE_SHADER_FRAGMENT],
244 &vc4->fragtex);
245 vc4_write_uniforms(vc4, vc4->prog.vs,
246 &vc4->constbuf[PIPE_SHADER_VERTEX],
247 &vc4->verttex);
248 vc4_write_uniforms(vc4, vc4->prog.cs,
249 &vc4->constbuf[PIPE_SHADER_VERTEX],
250 &vc4->verttex);
251
252 vc4->last_index_bias = index_bias + extra_index_bias;
253 vc4->max_index = max_index;
254 job->shader_rec_count++;
255 }
256
257 /**
258 * HW-2116 workaround: Flush the batch before triggering the hardware state
259 * counter wraparound behavior.
260 *
261 * State updates are tracked by a global counter which increments at the first
262 * state update after a draw or a START_BINNING. Tiles can then have their
263 * state updated at draw time with a set of cheap checks for whether the
264 * state's copy of the global counter matches the global counter the last time
265 * that state was written to the tile.
266 *
267 * The state counters are relatively small and wrap around quickly, so you
268 * could get false negatives for needing to update a particular state in the
269 * tile. To avoid this, the hardware attempts to write all of the state in
270 * the tile at wraparound time. This apparently is broken, so we just flush
271 * everything before that behavior is triggered. A batch flush is sufficient
272 * to get our current contents drawn and reset the counters to 0.
273 *
274 * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the
275 * tiles with VC4_PACKET_RETURN_FROM_LIST.
276 */
277 static void
vc4_hw_2116_workaround(struct pipe_context * pctx,int vert_count)278 vc4_hw_2116_workaround(struct pipe_context *pctx, int vert_count)
279 {
280 struct vc4_context *vc4 = vc4_context(pctx);
281 struct vc4_job *job = vc4_get_job_for_fbo(vc4);
282
283 if (job->draw_calls_queued + vert_count / 65535 >= VC4_HW_2116_COUNT) {
284 perf_debug("Flushing batch due to HW-2116 workaround "
285 "(too many draw calls per scene\n");
286 vc4_job_submit(vc4, job);
287 }
288 }
289
290 static void
vc4_draw_vbo(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)291 vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
292 unsigned drawid_offset,
293 const struct pipe_draw_indirect_info *indirect,
294 const struct pipe_draw_start_count_bias *draws,
295 unsigned num_draws)
296 {
297 if (num_draws > 1) {
298 util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
299 return;
300 }
301
302 if (!indirect && (!draws[0].count || !info->instance_count))
303 return;
304
305 struct vc4_context *vc4 = vc4_context(pctx);
306
307 if (!indirect &&
308 !info->primitive_restart &&
309 !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
310 return;
311
312 /* Before setting up the draw, do any fixup blits necessary. */
313 vc4_predraw_check_textures(pctx, &vc4->verttex);
314 vc4_predraw_check_textures(pctx, &vc4->fragtex);
315
316 vc4_hw_2116_workaround(pctx, draws[0].count);
317
318 struct vc4_job *job = vc4_get_job_for_fbo(vc4);
319
320 /* Make sure that the raster order flags haven't changed, which can
321 * only be set at job granularity.
322 */
323 if (job->flags != vc4->rasterizer->tile_raster_order_flags) {
324 vc4_job_submit(vc4, job);
325 job = vc4_get_job_for_fbo(vc4);
326 }
327
328 vc4_get_draw_cl_space(job, draws[0].count);
329
330 if (vc4->prim_mode != info->mode) {
331 vc4->prim_mode = info->mode;
332 vc4->dirty |= VC4_DIRTY_PRIM_MODE;
333 }
334
335 vc4_start_draw(vc4);
336 if (!vc4_update_compiled_shaders(vc4, info->mode)) {
337 debug_warn_once("shader compile failed, skipping draw call.\n");
338 return;
339 }
340
341 vc4_emit_state(pctx);
342
343 bool needs_drawarrays_shader_state = false;
344
345 unsigned index_bias = info->index_size ? draws->index_bias : 0;
346 if ((vc4->dirty & (VC4_DIRTY_VTXBUF |
347 VC4_DIRTY_VTXSTATE |
348 VC4_DIRTY_PRIM_MODE |
349 VC4_DIRTY_RASTERIZER |
350 VC4_DIRTY_COMPILED_CS |
351 VC4_DIRTY_COMPILED_VS |
352 VC4_DIRTY_COMPILED_FS |
353 vc4->prog.cs->uniform_dirty_bits |
354 vc4->prog.vs->uniform_dirty_bits |
355 vc4->prog.fs->uniform_dirty_bits)) ||
356 vc4->last_index_bias != index_bias) {
357 if (info->index_size)
358 vc4_emit_gl_shader_state(vc4, info, draws, 0);
359 else
360 needs_drawarrays_shader_state = true;
361 }
362
363 vc4->dirty = 0;
364
365 /* Note that the primitive type fields match with OpenGL/gallium
366 * definitions, up to but not including QUADS.
367 */
368 if (info->index_size) {
369 uint32_t index_size = info->index_size;
370 uint32_t offset = draws[0].start * index_size;
371 struct pipe_resource *prsc;
372 if (info->index_size == 4) {
373 prsc = vc4_get_shadow_index_buffer(pctx, info,
374 offset,
375 draws[0].count, &offset);
376 index_size = 2;
377 } else {
378 if (info->has_user_indices) {
379 unsigned start_offset = draws[0].start * info->index_size;
380 prsc = NULL;
381 u_upload_data(vc4->uploader, start_offset,
382 draws[0].count * index_size, 4,
383 (char*)info->index.user + start_offset,
384 &offset, &prsc);
385 } else {
386 prsc = info->index.resource;
387 }
388 }
389 struct vc4_resource *rsc = vc4_resource(prsc);
390
391 struct vc4_cl_out *bcl = cl_start(&job->bcl);
392
393 /* The original design for the VC4 kernel UABI had multiple
394 * packets that used relocations in the BCL (some of which
395 * needed two BOs), but later modifications eliminated all but
396 * this one usage. We have an arbitrary 32-bit offset value,
397 * and need to also supply an arbitrary 32-bit index buffer
398 * GEM handle, so we have this fake packet we emit in our BCL
399 * to be validated, which the kernel uses at validation time
400 * to perform the relocation in the IB packet (without
401 * emitting to the actual HW).
402 */
403 uint32_t hindex = vc4_gem_hindex(job, rsc->bo);
404 if (job->last_gem_handle_hindex != hindex) {
405 cl_u8(&bcl, VC4_PACKET_GEM_HANDLES);
406 cl_u32(&bcl, hindex);
407 cl_u32(&bcl, 0);
408 job->last_gem_handle_hindex = hindex;
409 }
410
411 cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE);
412 cl_u8(&bcl,
413 info->mode |
414 (index_size == 2 ?
415 VC4_INDEX_BUFFER_U16:
416 VC4_INDEX_BUFFER_U8));
417 cl_u32(&bcl, draws[0].count);
418 cl_u32(&bcl, offset);
419 cl_u32(&bcl, vc4->max_index);
420
421 cl_end(&job->bcl, bcl);
422 job->draw_calls_queued++;
423
424 if (info->index_size == 4 || info->has_user_indices)
425 pipe_resource_reference(&prsc, NULL);
426 } else {
427 uint32_t count = draws[0].count;
428 uint32_t start = draws[0].start;
429 uint32_t extra_index_bias = 0;
430 static const uint32_t max_verts = 65535;
431
432 /* GFXH-515 / SW-5891: The binner emits 16 bit indices for
433 * drawarrays, which means that if start + count > 64k it
434 * would truncate the top bits. Work around this by emitting
435 * a limited number of primitives at a time and reemitting the
436 * shader state pointing farther down the vertex attribute
437 * arrays.
438 *
439 * To do this properly for line loops or trifans, we'd need to
440 * make a new VB containing the first vertex plus whatever
441 * remainder.
442 */
443 if (start + count > max_verts) {
444 extra_index_bias = start;
445 start = 0;
446 needs_drawarrays_shader_state = true;
447 }
448
449 while (count) {
450 uint32_t this_count = count;
451 uint32_t step;
452
453 if (needs_drawarrays_shader_state) {
454 vc4_emit_gl_shader_state(vc4, info, draws,
455 extra_index_bias);
456 }
457
458 u_split_draw(info, max_verts, &this_count, &step);
459
460 cl_emit(&job->bcl, VERTEX_ARRAY_PRIMITIVES, array) {
461 array.primitive_mode = info->mode;
462 array.length = this_count;
463 array.index_of_first_vertex = start;
464 }
465 job->draw_calls_queued++;
466
467 count -= step;
468 extra_index_bias += start + step;
469 start = 0;
470 needs_drawarrays_shader_state = true;
471 }
472 }
473
474 /* We shouldn't have tripped the HW_2116 bug with the GFXH-515
475 * workaround.
476 */
477 assert(job->draw_calls_queued <= VC4_HW_2116_COUNT);
478
479 if (vc4->zsa && vc4->framebuffer.zsbuf) {
480 struct vc4_resource *rsc =
481 vc4_resource(vc4->framebuffer.zsbuf->texture);
482
483 if (vc4->zsa->base.depth_enabled) {
484 job->resolve |= PIPE_CLEAR_DEPTH;
485 rsc->initialized_buffers = PIPE_CLEAR_DEPTH;
486 }
487
488 if (vc4->zsa->base.stencil[0].enabled) {
489 job->resolve |= PIPE_CLEAR_STENCIL;
490 rsc->initialized_buffers |= PIPE_CLEAR_STENCIL;
491 }
492 }
493
494 job->resolve |= PIPE_CLEAR_COLOR0;
495
496 /* If we've used half of the presumably 256MB CMA area, flush the job
497 * so that we don't accumulate a job that will end up not being
498 * executable.
499 */
500 if (job->bo_space > 128 * 1024 * 1024)
501 vc4_flush(pctx);
502
503 if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH)
504 vc4_flush(pctx);
505 }
506
507 static uint32_t
pack_rgba(enum pipe_format format,const float * rgba)508 pack_rgba(enum pipe_format format, const float *rgba)
509 {
510 union util_color uc;
511 util_pack_color(rgba, format, &uc);
512 if (util_format_get_blocksize(format) == 2)
513 return uc.us;
514 else
515 return uc.ui[0];
516 }
517
518 static void
vc4_clear(struct pipe_context * pctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)519 vc4_clear(struct pipe_context *pctx, unsigned buffers, const struct pipe_scissor_state *scissor_state,
520 const union pipe_color_union *color, double depth, unsigned stencil)
521 {
522 struct vc4_context *vc4 = vc4_context(pctx);
523 struct vc4_job *job = vc4_get_job_for_fbo(vc4);
524
525 if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
526 struct vc4_resource *rsc =
527 vc4_resource(vc4->framebuffer.zsbuf->texture);
528 unsigned zsclear = buffers & PIPE_CLEAR_DEPTHSTENCIL;
529
530 /* Clearing ZS will clear both Z and stencil, so if we're
531 * trying to clear just one then we need to draw a quad to do
532 * it instead. We need to do this before setting up
533 * tile-based clears in vc4->job, because the blitter may
534 * submit the current job.
535 */
536 if ((zsclear == PIPE_CLEAR_DEPTH ||
537 zsclear == PIPE_CLEAR_STENCIL) &&
538 (rsc->initialized_buffers & ~(zsclear | job->cleared)) &&
539 util_format_is_depth_and_stencil(vc4->framebuffer.zsbuf->format)) {
540 static const union pipe_color_union dummy_color = {};
541
542 perf_debug("Partial clear of Z+stencil buffer, "
543 "drawing a quad instead of fast clearing\n");
544 vc4_blitter_save(vc4);
545 util_blitter_clear(vc4->blitter,
546 vc4->framebuffer.width,
547 vc4->framebuffer.height,
548 1,
549 zsclear,
550 &dummy_color, depth, stencil,
551 false);
552 buffers &= ~zsclear;
553 if (!buffers)
554 return;
555 job = vc4_get_job_for_fbo(vc4);
556 }
557 }
558
559 /* We can't flag new buffers for clearing once we've queued draws. We
560 * could avoid this by using the 3d engine to clear.
561 */
562 if (job->draw_calls_queued) {
563 perf_debug("Flushing rendering to process new clear.\n");
564 vc4_job_submit(vc4, job);
565 job = vc4_get_job_for_fbo(vc4);
566 }
567
568 if (buffers & PIPE_CLEAR_COLOR0) {
569 struct vc4_resource *rsc =
570 vc4_resource(vc4->framebuffer.cbufs[0]->texture);
571 uint32_t clear_color;
572
573 if (vc4_rt_format_is_565(vc4->framebuffer.cbufs[0]->format)) {
574 /* In 565 mode, the hardware will be packing our color
575 * for us.
576 */
577 clear_color = pack_rgba(PIPE_FORMAT_R8G8B8A8_UNORM,
578 color->f);
579 } else {
580 /* Otherwise, we need to do this packing because we
581 * support multiple swizzlings of RGBA8888.
582 */
583 clear_color =
584 pack_rgba(vc4->framebuffer.cbufs[0]->format,
585 color->f);
586 }
587 job->clear_color[0] = job->clear_color[1] = clear_color;
588 rsc->initialized_buffers |= (buffers & PIPE_CLEAR_COLOR0);
589 }
590
591 if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
592 struct vc4_resource *rsc =
593 vc4_resource(vc4->framebuffer.zsbuf->texture);
594
595 /* Though the depth buffer is stored with Z in the high 24,
596 * for this field we just need to store it in the low 24.
597 */
598 if (buffers & PIPE_CLEAR_DEPTH) {
599 job->clear_depth = util_pack_z(PIPE_FORMAT_Z24X8_UNORM,
600 depth);
601 }
602 if (buffers & PIPE_CLEAR_STENCIL)
603 job->clear_stencil = stencil;
604
605 rsc->initialized_buffers |= (buffers & PIPE_CLEAR_DEPTHSTENCIL);
606 }
607
608 job->draw_min_x = 0;
609 job->draw_min_y = 0;
610 job->draw_max_x = vc4->framebuffer.width;
611 job->draw_max_y = vc4->framebuffer.height;
612 job->cleared |= buffers;
613 job->resolve |= buffers;
614
615 vc4_start_draw(vc4);
616 }
617
618 static void
vc4_clear_render_target(struct pipe_context * pctx,struct pipe_surface * ps,const union pipe_color_union * color,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)619 vc4_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
620 const union pipe_color_union *color,
621 unsigned x, unsigned y, unsigned w, unsigned h,
622 bool render_condition_enabled)
623 {
624 fprintf(stderr, "unimpl: clear RT\n");
625 }
626
627 static void
vc4_clear_depth_stencil(struct pipe_context * pctx,struct pipe_surface * ps,unsigned buffers,double depth,unsigned stencil,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)628 vc4_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
629 unsigned buffers, double depth, unsigned stencil,
630 unsigned x, unsigned y, unsigned w, unsigned h,
631 bool render_condition_enabled)
632 {
633 fprintf(stderr, "unimpl: clear DS\n");
634 }
635
636 void
vc4_draw_init(struct pipe_context * pctx)637 vc4_draw_init(struct pipe_context *pctx)
638 {
639 pctx->draw_vbo = vc4_draw_vbo;
640 pctx->clear = vc4_clear;
641 pctx->clear_render_target = vc4_clear_render_target;
642 pctx->clear_depth_stencil = vc4_clear_depth_stencil;
643 }
644