1 /*
2  * Copyright (C) 2019-2020 Collabora, Ltd.
3  * © Copyright 2018 Alyssa Rosenzweig
4  * Copyright © 2014-2017 Broadcom
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24  * SOFTWARE.
25  *
26  */
27 
28 #include <sys/poll.h>
29 #include <errno.h>
30 
31 #include "pan_bo.h"
32 #include "pan_context.h"
33 #include "pan_minmax_cache.h"
34 #include "panfrost-quirks.h"
35 
36 #include "util/macros.h"
37 #include "util/format/u_format.h"
38 #include "util/u_inlines.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/u_memory.h"
41 #include "util/u_vbuf.h"
42 #include "util/half_float.h"
43 #include "util/u_helpers.h"
44 #include "util/format/u_format.h"
45 #include "util/u_prim.h"
46 #include "util/u_prim_restart.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_from_mesa.h"
49 #include "util/u_math.h"
50 
51 #include "pan_screen.h"
52 #include "pan_util.h"
53 #include "decode.h"
54 #include "util/pan_lower_framebuffer.h"
55 
56 static void
panfrost_clear(struct pipe_context * pipe,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)57 panfrost_clear(
58         struct pipe_context *pipe,
59         unsigned buffers,
60         const struct pipe_scissor_state *scissor_state,
61         const union pipe_color_union *color,
62         double depth, unsigned stencil)
63 {
64         struct panfrost_context *ctx = pan_context(pipe);
65 
66         if (!panfrost_render_condition_check(ctx))
67                 return;
68 
69         /* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
70          * the existing batch targeting this FBO has draws. We could probably
71          * avoid that by replacing plain clears by quad-draws with a specific
72          * color/depth/stencil value, thus avoiding the generation of extra
73          * fragment jobs.
74          */
75         struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx, "Clear");
76         panfrost_batch_clear(batch, buffers, color, depth, stencil);
77 }
78 
79 bool
panfrost_writes_point_size(struct panfrost_context * ctx)80 panfrost_writes_point_size(struct panfrost_context *ctx)
81 {
82         assert(ctx->shader[PIPE_SHADER_VERTEX]);
83         struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
84 
85         return vs->info.vs.writes_point_size && ctx->active_prim == PIPE_PRIM_POINTS;
86 }
87 
88 /* The entire frame is in memory -- send it off to the kernel! */
89 
90 void
panfrost_flush(struct pipe_context * pipe,struct pipe_fence_handle ** fence,unsigned flags)91 panfrost_flush(
92         struct pipe_context *pipe,
93         struct pipe_fence_handle **fence,
94         unsigned flags)
95 {
96         struct panfrost_context *ctx = pan_context(pipe);
97         struct panfrost_device *dev = pan_device(pipe->screen);
98 
99 
100         /* Submit all pending jobs */
101         panfrost_flush_all_batches(ctx, NULL);
102 
103         if (fence) {
104                 struct pipe_fence_handle *f = panfrost_fence_create(ctx);
105                 pipe->screen->fence_reference(pipe->screen, fence, NULL);
106                 *fence = f;
107         }
108 
109         if (dev->debug & PAN_DBG_TRACE)
110                 pandecode_next_frame();
111 }
112 
113 static void
panfrost_texture_barrier(struct pipe_context * pipe,unsigned flags)114 panfrost_texture_barrier(struct pipe_context *pipe, unsigned flags)
115 {
116         struct panfrost_context *ctx = pan_context(pipe);
117         panfrost_flush_all_batches(ctx, "Texture barrier");
118 }
119 
120 static void
panfrost_set_frontend_noop(struct pipe_context * pipe,bool enable)121 panfrost_set_frontend_noop(struct pipe_context *pipe, bool enable)
122 {
123         struct panfrost_context *ctx = pan_context(pipe);
124         panfrost_flush_all_batches(ctx, "Frontend no-op change");
125         ctx->is_noop = enable;
126 }
127 
128 
129 static void
panfrost_generic_cso_delete(struct pipe_context * pctx,void * hwcso)130 panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
131 {
132         free(hwcso);
133 }
134 
135 static void
panfrost_bind_blend_state(struct pipe_context * pipe,void * cso)136 panfrost_bind_blend_state(struct pipe_context *pipe, void *cso)
137 {
138         struct panfrost_context *ctx = pan_context(pipe);
139         ctx->blend = cso;
140         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
141 }
142 
143 static void
panfrost_set_blend_color(struct pipe_context * pipe,const struct pipe_blend_color * blend_color)144 panfrost_set_blend_color(struct pipe_context *pipe,
145                          const struct pipe_blend_color *blend_color)
146 {
147         struct panfrost_context *ctx = pan_context(pipe);
148         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
149 
150         if (blend_color)
151                 ctx->blend_color = *blend_color;
152 }
153 
154 /* Create a final blend given the context */
155 
156 mali_ptr
panfrost_get_blend(struct panfrost_batch * batch,unsigned rti,struct panfrost_bo ** bo,unsigned * shader_offset)157 panfrost_get_blend(struct panfrost_batch *batch, unsigned rti, struct panfrost_bo **bo, unsigned *shader_offset)
158 {
159         struct panfrost_context *ctx = batch->ctx;
160         struct panfrost_device *dev = pan_device(ctx->base.screen);
161         struct panfrost_blend_state *blend = ctx->blend;
162         struct pan_blend_info info = blend->info[rti];
163         struct pipe_surface *surf = batch->key.cbufs[rti];
164         enum pipe_format fmt = surf->format;
165 
166         /* Use fixed-function if the equation permits, the format is blendable,
167          * and no more than one unique constant is accessed */
168         if (info.fixed_function && panfrost_blendable_formats_v7[fmt].internal &&
169                         pan_blend_is_homogenous_constant(info.constant_mask,
170                                 ctx->blend_color.color)) {
171                 return 0;
172         }
173 
174         /* Otherwise, we need to grab a shader */
175         struct pan_blend_state pan_blend = blend->pan;
176         unsigned nr_samples = surf->nr_samples ? : surf->texture->nr_samples;
177 
178         pan_blend.rts[rti].format = fmt;
179         pan_blend.rts[rti].nr_samples = nr_samples;
180         memcpy(pan_blend.constants, ctx->blend_color.color,
181                sizeof(pan_blend.constants));
182 
183         /* Upload the shader, sharing a BO */
184         if (!(*bo)) {
185                 *bo = panfrost_batch_create_bo(batch, 4096, PAN_BO_EXECUTE,
186                                 PIPE_SHADER_FRAGMENT, "Blend shader");
187         }
188 
189         struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
190 
191         /* Default for Midgard */
192         nir_alu_type col0_type = nir_type_float32;
193         nir_alu_type col1_type = nir_type_float32;
194 
195         /* Bifrost has per-output types, respect them */
196         if (pan_is_bifrost(dev)) {
197                 col0_type = ss->info.bifrost.blend[rti].type;
198                 col1_type = ss->info.bifrost.blend_src1_type;
199         }
200 
201         pthread_mutex_lock(&dev->blend_shaders.lock);
202         struct pan_blend_shader_variant *shader =
203                 pan_screen(ctx->base.screen)->vtbl.get_blend_shader(dev,
204                                                                     &pan_blend,
205                                                                     col0_type,
206                                                                     col1_type,
207                                                                     rti);
208 
209         /* Size check and upload */
210         unsigned offset = *shader_offset;
211         assert((offset + shader->binary.size) < 4096);
212         memcpy((*bo)->ptr.cpu + offset, shader->binary.data, shader->binary.size);
213         *shader_offset += shader->binary.size;
214         pthread_mutex_unlock(&dev->blend_shaders.lock);
215 
216         return ((*bo)->ptr.gpu + offset) | shader->first_tag;
217 }
218 
219 static void
panfrost_bind_rasterizer_state(struct pipe_context * pctx,void * hwcso)220 panfrost_bind_rasterizer_state(
221         struct pipe_context *pctx,
222         void *hwcso)
223 {
224         struct panfrost_context *ctx = pan_context(pctx);
225         ctx->rasterizer = hwcso;
226 
227         /* We can assume the renderer state descriptor is always dirty, the
228          * dependencies are too intricate to bother tracking in detail. However
229          * we could probably diff the renderers for viewport dirty tracking,
230          * that just cares about the scissor enable and the depth clips. */
231         ctx->dirty |= PAN_DIRTY_SCISSOR;
232         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
233 }
234 
235 static void
panfrost_set_shader_images(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * iviews)236 panfrost_set_shader_images(
237         struct pipe_context *pctx,
238         enum pipe_shader_type shader,
239         unsigned start_slot, unsigned count, unsigned unbind_num_trailing_slots,
240         const struct pipe_image_view *iviews)
241 {
242         struct panfrost_context *ctx = pan_context(pctx);
243         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_IMAGE;
244 
245         /* Unbind start_slot...start_slot+count */
246         if (!iviews) {
247                 for (int i = start_slot; i < start_slot + count + unbind_num_trailing_slots; i++) {
248                         pipe_resource_reference(&ctx->images[shader][i].resource, NULL);
249                 }
250 
251                 ctx->image_mask[shader] &= ~(((1ull << count) - 1) << start_slot);
252                 return;
253         }
254 
255         /* Bind start_slot...start_slot+count */
256         for (int i = 0; i < count; i++) {
257                 const struct pipe_image_view *image = &iviews[i];
258                 SET_BIT(ctx->image_mask[shader], 1 << (start_slot + i), image->resource);
259 
260                 if (!image->resource) {
261                         util_copy_image_view(&ctx->images[shader][start_slot+i], NULL);
262                         continue;
263                 }
264 
265                 struct panfrost_resource *rsrc = pan_resource(image->resource);
266 
267                 /* Images don't work with AFBC, since they require pixel-level granularity */
268                 if (drm_is_afbc(rsrc->image.layout.modifier)) {
269                         pan_resource_modifier_convert(ctx, rsrc,
270                                         DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED,
271                                         "Shader image");
272                 }
273 
274                 util_copy_image_view(&ctx->images[shader][start_slot+i], image);
275         }
276 
277         /* Unbind start_slot+count...start_slot+count+unbind_num_trailing_slots */
278         for (int i = 0; i < unbind_num_trailing_slots; i++) {
279                 SET_BIT(ctx->image_mask[shader], 1 << (start_slot + count + i), NULL);
280                 util_copy_image_view(&ctx->images[shader][start_slot+count+i], NULL);
281         }
282 }
283 
284 static void
panfrost_bind_vertex_elements_state(struct pipe_context * pctx,void * hwcso)285 panfrost_bind_vertex_elements_state(
286         struct pipe_context *pctx,
287         void *hwcso)
288 {
289         struct panfrost_context *ctx = pan_context(pctx);
290         ctx->vertex = hwcso;
291 }
292 
293 static void *
panfrost_create_shader_state(struct pipe_context * pctx,const struct pipe_shader_state * cso,enum pipe_shader_type stage)294 panfrost_create_shader_state(
295         struct pipe_context *pctx,
296         const struct pipe_shader_state *cso,
297         enum pipe_shader_type stage)
298 {
299         struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
300         struct panfrost_device *dev = pan_device(pctx->screen);
301         so->base = *cso;
302 
303         simple_mtx_init(&so->lock, mtx_plain);
304 
305         /* Token deep copy to prevent memory corruption */
306 
307         if (cso->type == PIPE_SHADER_IR_TGSI)
308                 so->base.tokens = tgsi_dup_tokens(so->base.tokens);
309 
310         /* Precompile for shader-db if we need to */
311         if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
312                 struct panfrost_context *ctx = pan_context(pctx);
313 
314                 struct panfrost_shader_state state = { 0 };
315 
316                 panfrost_shader_compile(pctx->screen,
317                                         &ctx->shaders, &ctx->descs,
318                                         PIPE_SHADER_IR_NIR,
319                                         so->base.ir.nir,
320                                         tgsi_processor_to_shader_stage(stage),
321                                         &state);
322         }
323 
324         return so;
325 }
326 
327 static void
panfrost_delete_shader_state(struct pipe_context * pctx,void * so)328 panfrost_delete_shader_state(
329         struct pipe_context *pctx,
330         void *so)
331 {
332         struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
333 
334         if (!cso->is_compute && cso->base.type == PIPE_SHADER_IR_NIR)
335                 ralloc_free(cso->base.ir.nir);
336 
337         if (cso->base.type == PIPE_SHADER_IR_TGSI)
338                 tgsi_free_tokens(cso->base.tokens);
339 
340         for (unsigned i = 0; i < cso->variant_count; ++i) {
341                 struct panfrost_shader_state *shader_state = &cso->variants[i];
342                 panfrost_bo_unreference(shader_state->bin.bo);
343                 panfrost_bo_unreference(shader_state->state.bo);
344                 panfrost_bo_unreference(shader_state->linkage.bo);
345         }
346 
347         simple_mtx_destroy(&cso->lock);
348 
349         free(cso->variants);
350         free(so);
351 }
352 
353 static void
panfrost_bind_sampler_states(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_sampler,void ** sampler)354 panfrost_bind_sampler_states(
355         struct pipe_context *pctx,
356         enum pipe_shader_type shader,
357         unsigned start_slot, unsigned num_sampler,
358         void **sampler)
359 {
360         assert(start_slot == 0);
361 
362         struct panfrost_context *ctx = pan_context(pctx);
363         ctx->dirty_shader[shader] |= PAN_DIRTY_STAGE_SAMPLER;
364 
365         ctx->sampler_count[shader] = sampler ? num_sampler : 0;
366         if (sampler)
367                 memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
368 }
369 
370 static bool
panfrost_variant_matches(struct panfrost_context * ctx,struct panfrost_shader_state * variant,enum pipe_shader_type type)371 panfrost_variant_matches(
372         struct panfrost_context *ctx,
373         struct panfrost_shader_state *variant,
374         enum pipe_shader_type type)
375 {
376         if (variant->info.stage == MESA_SHADER_FRAGMENT &&
377             variant->info.fs.outputs_read) {
378                 struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
379 
380                 unsigned i;
381                 BITSET_FOREACH_SET(i, &variant->info.fs.outputs_read, 8) {
382                         enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM;
383 
384                         if ((fb->nr_cbufs > i) && fb->cbufs[i])
385                                 fmt = fb->cbufs[i]->format;
386 
387                         if (panfrost_blendable_formats_v6[fmt].internal)
388                                 fmt = PIPE_FORMAT_NONE;
389 
390                         if (variant->rt_formats[i] != fmt)
391                                 return false;
392                 }
393         }
394 
395         if (variant->info.stage == MESA_SHADER_FRAGMENT &&
396             variant->nr_cbufs != ctx->pipe_framebuffer.nr_cbufs)
397                 return false;
398 
399         /* Otherwise, we're good to go */
400         return true;
401 }
402 
403 /**
404  * Fix an uncompiled shader's stream output info, and produce a bitmask
405  * of which VARYING_SLOT_* are captured for stream output.
406  *
407  * Core Gallium stores output->register_index as a "slot" number, where
408  * slots are assigned consecutively to all outputs in info->outputs_written.
409  * This naive packing of outputs doesn't work for us - we too have slots,
410  * but the layout is defined by the VUE map, which we won't have until we
411  * compile a specific shader variant.  So, we remap these and simply store
412  * VARYING_SLOT_* in our copy's output->register_index fields.
413  *
414  * We then produce a bitmask of outputs which are used for SO.
415  *
416  * Implementation from iris.
417  */
418 
419 static uint64_t
update_so_info(struct pipe_stream_output_info * so_info,uint64_t outputs_written)420 update_so_info(struct pipe_stream_output_info *so_info,
421                uint64_t outputs_written)
422 {
423 	uint64_t so_outputs = 0;
424 	uint8_t reverse_map[64] = {0};
425 	unsigned slot = 0;
426 
427 	while (outputs_written)
428 		reverse_map[slot++] = u_bit_scan64(&outputs_written);
429 
430 	for (unsigned i = 0; i < so_info->num_outputs; i++) {
431 		struct pipe_stream_output *output = &so_info->output[i];
432 
433 		/* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
434 		output->register_index = reverse_map[output->register_index];
435 
436 		so_outputs |= 1ull << output->register_index;
437 	}
438 
439 	return so_outputs;
440 }
441 
442 static void
panfrost_bind_shader_state(struct pipe_context * pctx,void * hwcso,enum pipe_shader_type type)443 panfrost_bind_shader_state(
444         struct pipe_context *pctx,
445         void *hwcso,
446         enum pipe_shader_type type)
447 {
448         struct panfrost_context *ctx = pan_context(pctx);
449         ctx->shader[type] = hwcso;
450 
451         ctx->dirty |= PAN_DIRTY_TLS_SIZE;
452         ctx->dirty_shader[type] |= PAN_DIRTY_STAGE_RENDERER;
453 
454         if (!hwcso) return;
455 
456         /* Match the appropriate variant */
457 
458         signed variant = -1;
459         struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
460 
461         simple_mtx_lock(&variants->lock);
462 
463         for (unsigned i = 0; i < variants->variant_count; ++i) {
464                 if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
465                         variant = i;
466                         break;
467                 }
468         }
469 
470         if (variant == -1) {
471                 /* No variant matched, so create a new one */
472                 variant = variants->variant_count++;
473 
474                 if (variants->variant_count > variants->variant_space) {
475                         unsigned old_space = variants->variant_space;
476 
477                         variants->variant_space *= 2;
478                         if (variants->variant_space == 0)
479                                 variants->variant_space = 1;
480 
481                         /* Arbitrary limit to stop runaway programs from
482                          * creating an unbounded number of shader variants. */
483                         assert(variants->variant_space < 1024);
484 
485                         unsigned msize = sizeof(struct panfrost_shader_state);
486                         variants->variants = realloc(variants->variants,
487                                                      variants->variant_space * msize);
488 
489                         memset(&variants->variants[old_space], 0,
490                                (variants->variant_space - old_space) * msize);
491                 }
492 
493                 struct panfrost_shader_state *v =
494                                 &variants->variants[variant];
495 
496                 if (type == PIPE_SHADER_FRAGMENT) {
497                         struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
498                         v->nr_cbufs = fb->nr_cbufs;
499 
500                         for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
501                                 enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM;
502 
503                                 if ((fb->nr_cbufs > i) && fb->cbufs[i])
504                                         fmt = fb->cbufs[i]->format;
505 
506                                 if (panfrost_blendable_formats_v6[fmt].internal)
507                                         fmt = PIPE_FORMAT_NONE;
508 
509                                 v->rt_formats[i] = fmt;
510                         }
511                 }
512         }
513 
514         /* Select this variant */
515         variants->active_variant = variant;
516 
517         struct panfrost_shader_state *shader_state = &variants->variants[variant];
518         assert(panfrost_variant_matches(ctx, shader_state, type));
519 
520         /* We finally have a variant, so compile it */
521 
522         if (!shader_state->compiled) {
523                 panfrost_shader_compile(ctx->base.screen,
524                                         &ctx->shaders, &ctx->descs,
525                                         variants->base.type,
526                                         variants->base.type == PIPE_SHADER_IR_NIR ?
527                                         variants->base.ir.nir :
528                                         variants->base.tokens,
529                                         tgsi_processor_to_shader_stage(type),
530                                         shader_state);
531 
532                 shader_state->compiled = true;
533 
534                 /* Fixup the stream out information */
535                 shader_state->stream_output = variants->base.stream_output;
536                 shader_state->so_mask =
537                         update_so_info(&shader_state->stream_output,
538                                        shader_state->info.outputs_written);
539         }
540 
541         /* TODO: it would be more efficient to release the lock before
542          * compiling instead of after, but that can race if thread A compiles a
543          * variant while thread B searches for that same variant */
544         simple_mtx_unlock(&variants->lock);
545 }
546 
547 static void *
panfrost_create_vs_state(struct pipe_context * pctx,const struct pipe_shader_state * hwcso)548 panfrost_create_vs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
549 {
550         return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
551 }
552 
553 static void *
panfrost_create_fs_state(struct pipe_context * pctx,const struct pipe_shader_state * hwcso)554 panfrost_create_fs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
555 {
556         return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
557 }
558 
559 static void
panfrost_bind_vs_state(struct pipe_context * pctx,void * hwcso)560 panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
561 {
562         panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
563 }
564 
565 static void
panfrost_bind_fs_state(struct pipe_context * pctx,void * hwcso)566 panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
567 {
568         panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
569 }
570 
571 static void
panfrost_set_vertex_buffers(struct pipe_context * pctx,unsigned start_slot,unsigned num_buffers,unsigned unbind_num_trailing_slots,bool take_ownership,const struct pipe_vertex_buffer * buffers)572 panfrost_set_vertex_buffers(
573         struct pipe_context *pctx,
574         unsigned start_slot,
575         unsigned num_buffers,
576         unsigned unbind_num_trailing_slots,
577         bool take_ownership,
578         const struct pipe_vertex_buffer *buffers)
579 {
580         struct panfrost_context *ctx = pan_context(pctx);
581 
582         util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers,
583                                      start_slot, num_buffers, unbind_num_trailing_slots,
584                                      take_ownership);
585 }
586 
587 static void
panfrost_set_constant_buffer(struct pipe_context * pctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * buf)588 panfrost_set_constant_buffer(
589         struct pipe_context *pctx,
590         enum pipe_shader_type shader, uint index, bool take_ownership,
591         const struct pipe_constant_buffer *buf)
592 {
593         struct panfrost_context *ctx = pan_context(pctx);
594         struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
595 
596         util_copy_constant_buffer(&pbuf->cb[index], buf, take_ownership);
597 
598         unsigned mask = (1 << index);
599 
600         if (unlikely(!buf)) {
601                 pbuf->enabled_mask &= ~mask;
602                 return;
603         }
604 
605         pbuf->enabled_mask |= mask;
606         ctx->dirty_shader[shader] |= PAN_DIRTY_STAGE_CONST;
607 }
608 
609 static void
panfrost_set_stencil_ref(struct pipe_context * pctx,const struct pipe_stencil_ref ref)610 panfrost_set_stencil_ref(
611         struct pipe_context *pctx,
612         const struct pipe_stencil_ref ref)
613 {
614         struct panfrost_context *ctx = pan_context(pctx);
615         ctx->stencil_ref = ref;
616         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
617 }
618 
619 static void
panfrost_set_sampler_views(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_views,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)620 panfrost_set_sampler_views(
621         struct pipe_context *pctx,
622         enum pipe_shader_type shader,
623         unsigned start_slot, unsigned num_views,
624         unsigned unbind_num_trailing_slots,
625         bool take_ownership,
626         struct pipe_sampler_view **views)
627 {
628         struct panfrost_context *ctx = pan_context(pctx);
629         ctx->dirty_shader[shader] |= PAN_DIRTY_STAGE_TEXTURE;
630 
631         unsigned new_nr = 0;
632         unsigned i;
633 
634         assert(start_slot == 0);
635 
636         if (!views)
637                 num_views = 0;
638 
639         for (i = 0; i < num_views; ++i) {
640                 if (views[i])
641                         new_nr = i + 1;
642                 if (take_ownership) {
643                         pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
644                                                     NULL);
645                         ctx->sampler_views[shader][i] = (struct panfrost_sampler_view *)views[i];
646                 } else {
647                         pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
648                                                     views[i]);
649                 }
650         }
651 
652         for (; i < ctx->sampler_view_count[shader]; i++) {
653 		pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
654 		                            NULL);
655         }
656         ctx->sampler_view_count[shader] = new_nr;
657 }
658 
659 static void
panfrost_set_shader_buffers(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)660 panfrost_set_shader_buffers(
661         struct pipe_context *pctx,
662         enum pipe_shader_type shader,
663         unsigned start, unsigned count,
664         const struct pipe_shader_buffer *buffers,
665         unsigned writable_bitmask)
666 {
667         struct panfrost_context *ctx = pan_context(pctx);
668 
669         util_set_shader_buffers_mask(ctx->ssbo[shader], &ctx->ssbo_mask[shader],
670                         buffers, start, count);
671 }
672 
673 static void
panfrost_set_framebuffer_state(struct pipe_context * pctx,const struct pipe_framebuffer_state * fb)674 panfrost_set_framebuffer_state(struct pipe_context *pctx,
675                                const struct pipe_framebuffer_state *fb)
676 {
677         struct panfrost_context *ctx = pan_context(pctx);
678 
679         util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
680         ctx->batch = NULL;
681 
682         /* Hot draw call path needs the mask of active render targets */
683         ctx->fb_rt_mask = 0;
684 
685         for (unsigned i = 0; i < ctx->pipe_framebuffer.nr_cbufs; ++i) {
686                 if (ctx->pipe_framebuffer.cbufs[i])
687                         ctx->fb_rt_mask |= BITFIELD_BIT(i);
688         }
689 
690         /* We may need to generate a new variant if the fragment shader is
691          * keyed to the framebuffer format or render target count */
692         struct panfrost_shader_variants *fs = ctx->shader[PIPE_SHADER_FRAGMENT];
693 
694         if (fs && fs->variant_count)
695                 ctx->base.bind_fs_state(&ctx->base, fs);
696 }
697 
698 static void
panfrost_bind_depth_stencil_state(struct pipe_context * pipe,void * cso)699 panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
700                                   void *cso)
701 {
702         struct panfrost_context *ctx = pan_context(pipe);
703         ctx->depth_stencil = cso;
704         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
705 }
706 
707 static void
panfrost_set_sample_mask(struct pipe_context * pipe,unsigned sample_mask)708 panfrost_set_sample_mask(struct pipe_context *pipe,
709                          unsigned sample_mask)
710 {
711         struct panfrost_context *ctx = pan_context(pipe);
712         ctx->sample_mask = sample_mask;
713         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
714 }
715 
716 static void
panfrost_set_min_samples(struct pipe_context * pipe,unsigned min_samples)717 panfrost_set_min_samples(struct pipe_context *pipe,
718                          unsigned min_samples)
719 {
720         struct panfrost_context *ctx = pan_context(pipe);
721         ctx->min_samples = min_samples;
722         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
723 }
724 
725 static void
panfrost_set_clip_state(struct pipe_context * pipe,const struct pipe_clip_state * clip)726 panfrost_set_clip_state(struct pipe_context *pipe,
727                         const struct pipe_clip_state *clip)
728 {
729         //struct panfrost_context *panfrost = pan_context(pipe);
730 }
731 
732 static void
panfrost_set_viewport_states(struct pipe_context * pipe,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewports)733 panfrost_set_viewport_states(struct pipe_context *pipe,
734                              unsigned start_slot,
735                              unsigned num_viewports,
736                              const struct pipe_viewport_state *viewports)
737 {
738         struct panfrost_context *ctx = pan_context(pipe);
739 
740         assert(start_slot == 0);
741         assert(num_viewports == 1);
742 
743         ctx->pipe_viewport = *viewports;
744         ctx->dirty |= PAN_DIRTY_VIEWPORT;
745 }
746 
747 static void
panfrost_set_scissor_states(struct pipe_context * pipe,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissors)748 panfrost_set_scissor_states(struct pipe_context *pipe,
749                             unsigned start_slot,
750                             unsigned num_scissors,
751                             const struct pipe_scissor_state *scissors)
752 {
753         struct panfrost_context *ctx = pan_context(pipe);
754 
755         assert(start_slot == 0);
756         assert(num_scissors == 1);
757 
758         ctx->scissor = *scissors;
759         ctx->dirty |= PAN_DIRTY_SCISSOR;
760 }
761 
762 static void
panfrost_set_polygon_stipple(struct pipe_context * pipe,const struct pipe_poly_stipple * stipple)763 panfrost_set_polygon_stipple(struct pipe_context *pipe,
764                              const struct pipe_poly_stipple *stipple)
765 {
766         //struct panfrost_context *panfrost = pan_context(pipe);
767 }
768 
769 static void
panfrost_set_active_query_state(struct pipe_context * pipe,bool enable)770 panfrost_set_active_query_state(struct pipe_context *pipe,
771                                 bool enable)
772 {
773         struct panfrost_context *ctx = pan_context(pipe);
774         ctx->active_queries = enable;
775         ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
776 }
777 
778 static void
panfrost_render_condition(struct pipe_context * pipe,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)779 panfrost_render_condition(struct pipe_context *pipe,
780                           struct pipe_query *query,
781                           bool condition,
782                           enum pipe_render_cond_flag mode)
783 {
784         struct panfrost_context *ctx = pan_context(pipe);
785 
786         ctx->cond_query = (struct panfrost_query *)query;
787         ctx->cond_cond = condition;
788         ctx->cond_mode = mode;
789 }
790 
791 static void
panfrost_destroy(struct pipe_context * pipe)792 panfrost_destroy(struct pipe_context *pipe)
793 {
794         struct panfrost_context *panfrost = pan_context(pipe);
795 
796         _mesa_hash_table_destroy(panfrost->writers, NULL);
797 
798         if (panfrost->blitter)
799                 util_blitter_destroy(panfrost->blitter);
800 
801         util_unreference_framebuffer_state(&panfrost->pipe_framebuffer);
802         u_upload_destroy(pipe->stream_uploader);
803 
804         panfrost_pool_cleanup(&panfrost->descs);
805         panfrost_pool_cleanup(&panfrost->shaders);
806 
807         ralloc_free(pipe);
808 }
809 
810 static struct pipe_query *
panfrost_create_query(struct pipe_context * pipe,unsigned type,unsigned index)811 panfrost_create_query(struct pipe_context *pipe,
812                       unsigned type,
813                       unsigned index)
814 {
815         struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
816 
817         q->type = type;
818         q->index = index;
819 
820         return (struct pipe_query *) q;
821 }
822 
823 static void
panfrost_destroy_query(struct pipe_context * pipe,struct pipe_query * q)824 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
825 {
826         struct panfrost_query *query = (struct panfrost_query *) q;
827 
828         if (query->rsrc)
829                 pipe_resource_reference(&query->rsrc, NULL);
830 
831         ralloc_free(q);
832 }
833 
834 static bool
panfrost_begin_query(struct pipe_context * pipe,struct pipe_query * q)835 panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
836 {
837         struct panfrost_context *ctx = pan_context(pipe);
838         struct panfrost_device *dev = pan_device(ctx->base.screen);
839         struct panfrost_query *query = (struct panfrost_query *) q;
840 
841         switch (query->type) {
842         case PIPE_QUERY_OCCLUSION_COUNTER:
843         case PIPE_QUERY_OCCLUSION_PREDICATE:
844         case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
845                 unsigned size = sizeof(uint64_t) * dev->core_count;
846 
847                 /* Allocate a resource for the query results to be stored */
848                 if (!query->rsrc) {
849                         query->rsrc = pipe_buffer_create(ctx->base.screen,
850                                         PIPE_BIND_QUERY_BUFFER, 0, size);
851                 }
852 
853                 /* Default to 0 if nothing at all drawn. */
854                 uint8_t *zeroes = alloca(size);
855                 memset(zeroes, 0, size);
856                 pipe_buffer_write(pipe, query->rsrc, 0, size, zeroes);
857 
858                 query->msaa = (ctx->pipe_framebuffer.samples > 1);
859                 ctx->occlusion_query = query;
860                 ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
861                 break;
862         }
863 
864         /* Geometry statistics are computed in the driver. XXX: geom/tess
865          * shaders.. */
866 
867         case PIPE_QUERY_PRIMITIVES_GENERATED:
868                 query->start = ctx->prims_generated;
869                 break;
870         case PIPE_QUERY_PRIMITIVES_EMITTED:
871                 query->start = ctx->tf_prims_generated;
872                 break;
873 
874         default:
875                 /* TODO: timestamp queries, etc? */
876                 break;
877         }
878 
879         return true;
880 }
881 
882 static bool
panfrost_end_query(struct pipe_context * pipe,struct pipe_query * q)883 panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
884 {
885         struct panfrost_context *ctx = pan_context(pipe);
886         struct panfrost_query *query = (struct panfrost_query *) q;
887 
888         switch (query->type) {
889         case PIPE_QUERY_OCCLUSION_COUNTER:
890         case PIPE_QUERY_OCCLUSION_PREDICATE:
891         case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
892                 ctx->occlusion_query = NULL;
893                 ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
894                 break;
895         case PIPE_QUERY_PRIMITIVES_GENERATED:
896                 query->end = ctx->prims_generated;
897                 break;
898         case PIPE_QUERY_PRIMITIVES_EMITTED:
899                 query->end = ctx->tf_prims_generated;
900                 break;
901         }
902 
903         return true;
904 }
905 
906 static bool
panfrost_get_query_result(struct pipe_context * pipe,struct pipe_query * q,bool wait,union pipe_query_result * vresult)907 panfrost_get_query_result(struct pipe_context *pipe,
908                           struct pipe_query *q,
909                           bool wait,
910                           union pipe_query_result *vresult)
911 {
912         struct panfrost_query *query = (struct panfrost_query *) q;
913         struct panfrost_context *ctx = pan_context(pipe);
914         struct panfrost_device *dev = pan_device(ctx->base.screen);
915         struct panfrost_resource *rsrc = pan_resource(query->rsrc);
916 
917         switch (query->type) {
918         case PIPE_QUERY_OCCLUSION_COUNTER:
919         case PIPE_QUERY_OCCLUSION_PREDICATE:
920         case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
921                 panfrost_flush_writer(ctx, rsrc, "Occlusion query");
922                 panfrost_bo_wait(rsrc->image.data.bo, INT64_MAX, false);
923 
924                 /* Read back the query results */
925                 uint64_t *result = (uint64_t *) rsrc->image.data.bo->ptr.cpu;
926 
927                 if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
928                         uint64_t passed = 0;
929                         for (int i = 0; i < dev->core_count; ++i)
930                                 passed += result[i];
931 
932                         if (!pan_is_bifrost(dev) && !query->msaa)
933                                 passed /= 4;
934 
935                         vresult->u64 = passed;
936                 } else {
937                         vresult->b = !!result[0];
938                 }
939 
940                 break;
941 
942         case PIPE_QUERY_PRIMITIVES_GENERATED:
943         case PIPE_QUERY_PRIMITIVES_EMITTED:
944                 panfrost_flush_all_batches(ctx, "Primitive count query");
945                 vresult->u64 = query->end - query->start;
946                 break;
947 
948         default:
949                 /* TODO: more queries */
950                 break;
951         }
952 
953         return true;
954 }
955 
956 bool
panfrost_render_condition_check(struct panfrost_context * ctx)957 panfrost_render_condition_check(struct panfrost_context *ctx)
958 {
959 	if (!ctx->cond_query)
960 		return true;
961 
962         perf_debug_ctx(ctx, "Implementing conditional rendering on the CPU");
963 
964 	union pipe_query_result res = { 0 };
965 	bool wait =
966 		ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
967 		ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
968 
969         struct pipe_query *pq = (struct pipe_query *)ctx->cond_query;
970 
971         if (panfrost_get_query_result(&ctx->base, pq, wait, &res))
972                 return res.u64 != ctx->cond_cond;
973 
974 	return true;
975 }
976 
977 static struct pipe_stream_output_target *
panfrost_create_stream_output_target(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned buffer_offset,unsigned buffer_size)978 panfrost_create_stream_output_target(struct pipe_context *pctx,
979                                      struct pipe_resource *prsc,
980                                      unsigned buffer_offset,
981                                      unsigned buffer_size)
982 {
983         struct pipe_stream_output_target *target;
984 
985         target = &rzalloc(pctx, struct panfrost_streamout_target)->base;
986 
987         if (!target)
988                 return NULL;
989 
990         pipe_reference_init(&target->reference, 1);
991         pipe_resource_reference(&target->buffer, prsc);
992 
993         target->context = pctx;
994         target->buffer_offset = buffer_offset;
995         target->buffer_size = buffer_size;
996 
997         return target;
998 }
999 
1000 static void
panfrost_stream_output_target_destroy(struct pipe_context * pctx,struct pipe_stream_output_target * target)1001 panfrost_stream_output_target_destroy(struct pipe_context *pctx,
1002                                       struct pipe_stream_output_target *target)
1003 {
1004         pipe_resource_reference(&target->buffer, NULL);
1005         ralloc_free(target);
1006 }
1007 
1008 static void
panfrost_set_stream_output_targets(struct pipe_context * pctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)1009 panfrost_set_stream_output_targets(struct pipe_context *pctx,
1010                                    unsigned num_targets,
1011                                    struct pipe_stream_output_target **targets,
1012                                    const unsigned *offsets)
1013 {
1014         struct panfrost_context *ctx = pan_context(pctx);
1015         struct panfrost_streamout *so = &ctx->streamout;
1016 
1017         assert(num_targets <= ARRAY_SIZE(so->targets));
1018 
1019         for (unsigned i = 0; i < num_targets; i++) {
1020                 if (offsets[i] != -1)
1021                         pan_so_target(targets[i])->offset = offsets[i];
1022 
1023                 pipe_so_target_reference(&so->targets[i], targets[i]);
1024         }
1025 
1026         for (unsigned i = 0; i < so->num_targets; i++)
1027                 pipe_so_target_reference(&so->targets[i], NULL);
1028 
1029         so->num_targets = num_targets;
1030 }
1031 
1032 struct pipe_context *
panfrost_create_context(struct pipe_screen * screen,void * priv,unsigned flags)1033 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
1034 {
1035         struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
1036         struct pipe_context *gallium = (struct pipe_context *) ctx;
1037         struct panfrost_device *dev = pan_device(screen);
1038 
1039         gallium->screen = screen;
1040 
1041         gallium->destroy = panfrost_destroy;
1042 
1043         gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
1044 
1045         gallium->flush = panfrost_flush;
1046         gallium->clear = panfrost_clear;
1047         gallium->texture_barrier = panfrost_texture_barrier;
1048         gallium->set_frontend_noop = panfrost_set_frontend_noop;
1049 
1050         gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
1051         gallium->set_constant_buffer = panfrost_set_constant_buffer;
1052         gallium->set_shader_buffers = panfrost_set_shader_buffers;
1053         gallium->set_shader_images = panfrost_set_shader_images;
1054 
1055         gallium->set_stencil_ref = panfrost_set_stencil_ref;
1056 
1057         gallium->set_sampler_views = panfrost_set_sampler_views;
1058 
1059         gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
1060         gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
1061 
1062         gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
1063         gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
1064 
1065         gallium->create_fs_state = panfrost_create_fs_state;
1066         gallium->delete_fs_state = panfrost_delete_shader_state;
1067         gallium->bind_fs_state = panfrost_bind_fs_state;
1068 
1069         gallium->create_vs_state = panfrost_create_vs_state;
1070         gallium->delete_vs_state = panfrost_delete_shader_state;
1071         gallium->bind_vs_state = panfrost_bind_vs_state;
1072 
1073         gallium->delete_sampler_state = panfrost_generic_cso_delete;
1074         gallium->bind_sampler_states = panfrost_bind_sampler_states;
1075 
1076         gallium->bind_depth_stencil_alpha_state   = panfrost_bind_depth_stencil_state;
1077         gallium->delete_depth_stencil_alpha_state = panfrost_generic_cso_delete;
1078 
1079         gallium->set_sample_mask = panfrost_set_sample_mask;
1080         gallium->set_min_samples = panfrost_set_min_samples;
1081 
1082         gallium->set_clip_state = panfrost_set_clip_state;
1083         gallium->set_viewport_states = panfrost_set_viewport_states;
1084         gallium->set_scissor_states = panfrost_set_scissor_states;
1085         gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
1086         gallium->set_active_query_state = panfrost_set_active_query_state;
1087         gallium->render_condition = panfrost_render_condition;
1088 
1089         gallium->create_query = panfrost_create_query;
1090         gallium->destroy_query = panfrost_destroy_query;
1091         gallium->begin_query = panfrost_begin_query;
1092         gallium->end_query = panfrost_end_query;
1093         gallium->get_query_result = panfrost_get_query_result;
1094 
1095         gallium->create_stream_output_target = panfrost_create_stream_output_target;
1096         gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
1097         gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
1098 
1099         gallium->bind_blend_state   = panfrost_bind_blend_state;
1100         gallium->delete_blend_state = panfrost_generic_cso_delete;
1101 
1102         gallium->set_blend_color = panfrost_set_blend_color;
1103 
1104         pan_screen(screen)->vtbl.context_init(gallium);
1105 
1106         panfrost_resource_context_init(gallium);
1107         panfrost_compute_context_init(gallium);
1108 
1109         gallium->stream_uploader = u_upload_create_default(gallium);
1110         gallium->const_uploader = gallium->stream_uploader;
1111 
1112         panfrost_pool_init(&ctx->descs, ctx, dev,
1113                         0, 4096, "Descriptors", true, false);
1114 
1115         panfrost_pool_init(&ctx->shaders, ctx, dev,
1116                         PAN_BO_EXECUTE, 4096, "Shaders", true, false);
1117 
1118         ctx->blitter = util_blitter_create(gallium);
1119 
1120         ctx->writers = _mesa_hash_table_create(gallium, _mesa_hash_pointer,
1121                                                         _mesa_key_pointer_equal);
1122 
1123         assert(ctx->blitter);
1124 
1125         /* Prepare for render! */
1126 
1127         /* By default mask everything on */
1128         ctx->sample_mask = ~0;
1129         ctx->active_queries = true;
1130 
1131         int ASSERTED ret;
1132 
1133         /* Create a syncobj in a signaled state. Will be updated to point to the
1134          * last queued job out_sync every time we submit a new job.
1135          */
1136         ret = drmSyncobjCreate(dev->fd, DRM_SYNCOBJ_CREATE_SIGNALED, &ctx->syncobj);
1137         assert(!ret && ctx->syncobj);
1138 
1139         return gallium;
1140 }
1141