1 /*
2  * Copyright (C) 2018 Alyssa Rosenzweig
3  * Copyright (C) 2020 Collabora Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "util/macros.h"
26 #include "util/u_prim.h"
27 #include "util/u_vbuf.h"
28 
29 #include "panfrost-quirks.h"
30 
31 #include "pan_pool.h"
32 #include "pan_bo.h"
33 #include "pan_cmdstream.h"
34 #include "pan_context.h"
35 #include "pan_job.h"
36 
37 /* If a BO is accessed for a particular shader stage, will it be in the primary
38  * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
39  * fragment will be primary, e.g. compute jobs will be considered
40  * "vertex/tiler" by analogy */
41 
42 static inline uint32_t
panfrost_bo_access_for_stage(enum pipe_shader_type stage)43 panfrost_bo_access_for_stage(enum pipe_shader_type stage)
44 {
45         assert(stage == PIPE_SHADER_FRAGMENT ||
46                stage == PIPE_SHADER_VERTEX ||
47                stage == PIPE_SHADER_COMPUTE);
48 
49         return stage == PIPE_SHADER_FRAGMENT ?
50                PAN_BO_ACCESS_FRAGMENT :
51                PAN_BO_ACCESS_VERTEX_TILER;
52 }
53 
54 static void
panfrost_vt_emit_shared_memory(struct panfrost_context * ctx,struct mali_vertex_tiler_postfix * postfix)55 panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
56                                struct mali_vertex_tiler_postfix *postfix)
57 {
58         struct panfrost_device *dev = pan_device(ctx->base.screen);
59         struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
60 
61         unsigned shift = panfrost_get_stack_shift(batch->stack_size);
62         struct mali_shared_memory shared = {
63                 .stack_shift = shift,
64                 .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
65                 .shared_workgroup_count = ~0,
66         };
67         postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
68 }
69 
70 static void
panfrost_vt_attach_framebuffer(struct panfrost_context * ctx,struct mali_vertex_tiler_postfix * postfix)71 panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
72                                struct mali_vertex_tiler_postfix *postfix)
73 {
74         struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
75         postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
76 }
77 
78 static void
panfrost_vt_update_rasterizer(struct panfrost_context * ctx,struct mali_vertex_tiler_prefix * prefix,struct mali_vertex_tiler_postfix * postfix)79 panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
80                               struct mali_vertex_tiler_prefix *prefix,
81                               struct mali_vertex_tiler_postfix *postfix)
82 {
83         struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
84 
85         postfix->gl_enables |= 0x7;
86         SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
87                 rasterizer && rasterizer->base.front_ccw);
88         SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
89                 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
90         SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
91                 rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
92         SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
93                 rasterizer && rasterizer->base.flatshade_first);
94 }
95 
96 void
panfrost_vt_update_primitive_size(struct panfrost_context * ctx,struct mali_vertex_tiler_prefix * prefix,union midgard_primitive_size * primitive_size)97 panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
98                                   struct mali_vertex_tiler_prefix *prefix,
99                                   union midgard_primitive_size *primitive_size)
100 {
101         struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
102 
103         if (!panfrost_writes_point_size(ctx)) {
104                 bool points = prefix->draw_mode == MALI_POINTS;
105                 float val = 0.0f;
106 
107                 if (rasterizer)
108                         val = points ?
109                               rasterizer->base.point_size :
110                               rasterizer->base.line_width;
111 
112                 primitive_size->constant = val;
113         }
114 }
115 
116 static void
panfrost_vt_update_occlusion_query(struct panfrost_context * ctx,struct mali_vertex_tiler_postfix * postfix)117 panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
118                                    struct mali_vertex_tiler_postfix *postfix)
119 {
120         SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
121         if (ctx->occlusion_query) {
122                 postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
123                 panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
124                                       PAN_BO_ACCESS_SHARED |
125                                       PAN_BO_ACCESS_RW |
126                                       PAN_BO_ACCESS_FRAGMENT);
127         } else {
128                 postfix->occlusion_counter = 0;
129         }
130 }
131 
132 void
panfrost_vt_init(struct panfrost_context * ctx,enum pipe_shader_type stage,struct mali_vertex_tiler_prefix * prefix,struct mali_vertex_tiler_postfix * postfix)133 panfrost_vt_init(struct panfrost_context *ctx,
134                  enum pipe_shader_type stage,
135                  struct mali_vertex_tiler_prefix *prefix,
136                  struct mali_vertex_tiler_postfix *postfix)
137 {
138         struct panfrost_device *device = pan_device(ctx->base.screen);
139 
140         if (!ctx->shader[stage])
141                 return;
142 
143         memset(prefix, 0, sizeof(*prefix));
144         memset(postfix, 0, sizeof(*postfix));
145 
146         if (device->quirks & IS_BIFROST) {
147                 postfix->gl_enables = 0x2;
148                 panfrost_vt_emit_shared_memory(ctx, postfix);
149         } else {
150                 postfix->gl_enables = 0x6;
151                 panfrost_vt_attach_framebuffer(ctx, postfix);
152         }
153 
154         if (stage == PIPE_SHADER_FRAGMENT) {
155                 panfrost_vt_update_occlusion_query(ctx, postfix);
156                 panfrost_vt_update_rasterizer(ctx, prefix, postfix);
157         }
158 }
159 
160 static unsigned
panfrost_translate_index_size(unsigned size)161 panfrost_translate_index_size(unsigned size)
162 {
163         switch (size) {
164         case 1:
165                 return MALI_DRAW_INDEXED_UINT8;
166 
167         case 2:
168                 return MALI_DRAW_INDEXED_UINT16;
169 
170         case 4:
171                 return MALI_DRAW_INDEXED_UINT32;
172 
173         default:
174                 unreachable("Invalid index size");
175         }
176 }
177 
178 /* Gets a GPU address for the associated index buffer. Only gauranteed to be
179  * good for the duration of the draw (transient), could last longer. Also get
180  * the bounds on the index buffer for the range accessed by the draw. We do
181  * these operations together because there are natural optimizations which
182  * require them to be together. */
183 
184 static mali_ptr
panfrost_get_index_buffer_bounded(struct panfrost_context * ctx,const struct pipe_draw_info * info,unsigned * min_index,unsigned * max_index)185 panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
186                                   const struct pipe_draw_info *info,
187                                   unsigned *min_index, unsigned *max_index)
188 {
189         struct panfrost_resource *rsrc = pan_resource(info->index.resource);
190         struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
191         off_t offset = info->start * info->index_size;
192         bool needs_indices = true;
193         mali_ptr out = 0;
194 
195         if (info->max_index != ~0u) {
196                 *min_index = info->min_index;
197                 *max_index = info->max_index;
198                 needs_indices = false;
199         }
200 
201         if (!info->has_user_indices) {
202                 /* Only resources can be directly mapped */
203                 panfrost_batch_add_bo(batch, rsrc->bo,
204                                       PAN_BO_ACCESS_SHARED |
205                                       PAN_BO_ACCESS_READ |
206                                       PAN_BO_ACCESS_VERTEX_TILER);
207                 out = rsrc->bo->gpu + offset;
208 
209                 /* Check the cache */
210                 needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
211                                                            info->start,
212                                                            info->count,
213                                                            min_index,
214                                                            max_index);
215         } else {
216                 /* Otherwise, we need to upload to transient memory */
217                 const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
218                 out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
219                                                 info->count *
220                                                 info->index_size);
221         }
222 
223         if (needs_indices) {
224                 /* Fallback */
225                 u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
226 
227                 if (!info->has_user_indices)
228                         panfrost_minmax_cache_add(rsrc->index_cache,
229                                                   info->start, info->count,
230                                                   *min_index, *max_index);
231         }
232 
233         return out;
234 }
235 
236 void
panfrost_vt_set_draw_info(struct panfrost_context * ctx,const struct pipe_draw_info * info,enum mali_draw_mode draw_mode,struct mali_vertex_tiler_postfix * vertex_postfix,struct mali_vertex_tiler_prefix * tiler_prefix,struct mali_vertex_tiler_postfix * tiler_postfix,unsigned * vertex_count,unsigned * padded_count)237 panfrost_vt_set_draw_info(struct panfrost_context *ctx,
238                           const struct pipe_draw_info *info,
239                           enum mali_draw_mode draw_mode,
240                           struct mali_vertex_tiler_postfix *vertex_postfix,
241                           struct mali_vertex_tiler_prefix *tiler_prefix,
242                           struct mali_vertex_tiler_postfix *tiler_postfix,
243                           unsigned *vertex_count,
244                           unsigned *padded_count)
245 {
246         tiler_prefix->draw_mode = draw_mode;
247 
248         unsigned draw_flags = 0;
249 
250         if (panfrost_writes_point_size(ctx))
251                 draw_flags |= MALI_DRAW_VARYING_SIZE;
252 
253         if (info->primitive_restart)
254                 draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
255 
256         /* These doesn't make much sense */
257 
258         draw_flags |= 0x3000;
259 
260         if (info->index_size) {
261                 unsigned min_index = 0, max_index = 0;
262 
263                 tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
264                                                                        info,
265                                                                        &min_index,
266                                                                        &max_index);
267 
268                 /* Use the corresponding values */
269                 *vertex_count = max_index - min_index + 1;
270                 tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
271                 tiler_prefix->offset_bias_correction = -min_index;
272                 tiler_prefix->index_count = MALI_POSITIVE(info->count);
273                 draw_flags |= panfrost_translate_index_size(info->index_size);
274         } else {
275                 tiler_prefix->indices = 0;
276                 *vertex_count = ctx->vertex_count;
277                 tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
278                 tiler_prefix->offset_bias_correction = 0;
279                 tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
280         }
281 
282         tiler_prefix->unknown_draw = draw_flags;
283 
284         /* Encode the padded vertex count */
285 
286         if (info->instance_count > 1) {
287                 *padded_count = panfrost_padded_vertex_count(*vertex_count);
288 
289                 unsigned shift = __builtin_ctz(ctx->padded_count);
290                 unsigned k = ctx->padded_count >> (shift + 1);
291 
292                 tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
293                 tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
294         } else {
295                 *padded_count = *vertex_count;
296 
297                 /* Reset instancing state */
298                 tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
299                 tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
300         }
301 }
302 
303 static void
panfrost_shader_meta_init(struct panfrost_context * ctx,enum pipe_shader_type st,struct mali_shader_meta * meta)304 panfrost_shader_meta_init(struct panfrost_context *ctx,
305                           enum pipe_shader_type st,
306                           struct mali_shader_meta *meta)
307 {
308         const struct panfrost_device *dev = pan_device(ctx->base.screen);
309         struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
310 
311         memset(meta, 0, sizeof(*meta));
312         meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
313         meta->attribute_count = ss->attribute_count;
314         meta->varying_count = ss->varying_count;
315         meta->texture_count = ctx->sampler_view_count[st];
316         meta->sampler_count = ctx->sampler_count[st];
317 
318         if (dev->quirks & IS_BIFROST) {
319                 if (st == PIPE_SHADER_VERTEX)
320                         meta->bifrost1.unk1 = 0x800000;
321                 else {
322                         /* First clause ATEST |= 0x4000000.
323                          * Less than 32 regs |= 0x200 */
324                         meta->bifrost1.unk1 = 0x950020;
325                 }
326 
327                 meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
328                 if (st == PIPE_SHADER_VERTEX)
329                         meta->bifrost2.preload_regs = 0xC0;
330                 else {
331                         meta->bifrost2.preload_regs = 0x1;
332                         SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
333                 }
334 
335                 meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
336                                                     ss->uniform_cutoff);
337         } else {
338                 meta->midgard1.uniform_count = MIN2(ss->uniform_count,
339                                                     ss->uniform_cutoff);
340                 meta->midgard1.work_count = ss->work_reg_count;
341 
342                 /* TODO: This is not conformant on ES3 */
343                 meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
344 
345                 meta->midgard1.flags_lo = 0x20;
346                 meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
347 
348                 SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
349         }
350 }
351 
352 static unsigned
panfrost_translate_compare_func(enum pipe_compare_func in)353 panfrost_translate_compare_func(enum pipe_compare_func in)
354 {
355         switch (in) {
356         case PIPE_FUNC_NEVER:
357                 return MALI_FUNC_NEVER;
358 
359         case PIPE_FUNC_LESS:
360                 return MALI_FUNC_LESS;
361 
362         case PIPE_FUNC_EQUAL:
363                 return MALI_FUNC_EQUAL;
364 
365         case PIPE_FUNC_LEQUAL:
366                 return MALI_FUNC_LEQUAL;
367 
368         case PIPE_FUNC_GREATER:
369                 return MALI_FUNC_GREATER;
370 
371         case PIPE_FUNC_NOTEQUAL:
372                 return MALI_FUNC_NOTEQUAL;
373 
374         case PIPE_FUNC_GEQUAL:
375                 return MALI_FUNC_GEQUAL;
376 
377         case PIPE_FUNC_ALWAYS:
378                 return MALI_FUNC_ALWAYS;
379 
380         default:
381                 unreachable("Invalid func");
382         }
383 }
384 
385 static unsigned
panfrost_translate_stencil_op(enum pipe_stencil_op in)386 panfrost_translate_stencil_op(enum pipe_stencil_op in)
387 {
388         switch (in) {
389         case PIPE_STENCIL_OP_KEEP:
390                 return MALI_STENCIL_KEEP;
391 
392         case PIPE_STENCIL_OP_ZERO:
393                 return MALI_STENCIL_ZERO;
394 
395         case PIPE_STENCIL_OP_REPLACE:
396                return MALI_STENCIL_REPLACE;
397 
398         case PIPE_STENCIL_OP_INCR:
399                 return MALI_STENCIL_INCR;
400 
401         case PIPE_STENCIL_OP_DECR:
402                 return MALI_STENCIL_DECR;
403 
404         case PIPE_STENCIL_OP_INCR_WRAP:
405                 return MALI_STENCIL_INCR_WRAP;
406 
407         case PIPE_STENCIL_OP_DECR_WRAP:
408                 return MALI_STENCIL_DECR_WRAP;
409 
410         case PIPE_STENCIL_OP_INVERT:
411                 return MALI_STENCIL_INVERT;
412 
413         default:
414                 unreachable("Invalid stencil op");
415         }
416 }
417 
418 static unsigned
translate_tex_wrap(enum pipe_tex_wrap w)419 translate_tex_wrap(enum pipe_tex_wrap w)
420 {
421         switch (w) {
422         case PIPE_TEX_WRAP_REPEAT:
423                 return MALI_WRAP_REPEAT;
424 
425         case PIPE_TEX_WRAP_CLAMP:
426                 return MALI_WRAP_CLAMP;
427 
428         case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
429                 return MALI_WRAP_CLAMP_TO_EDGE;
430 
431         case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
432                 return MALI_WRAP_CLAMP_TO_BORDER;
433 
434         case PIPE_TEX_WRAP_MIRROR_REPEAT:
435                 return MALI_WRAP_MIRRORED_REPEAT;
436 
437         case PIPE_TEX_WRAP_MIRROR_CLAMP:
438                 return MALI_WRAP_MIRRORED_CLAMP;
439 
440         case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
441                 return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
442 
443         case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
444                 return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
445 
446         default:
447                 unreachable("Invalid wrap");
448         }
449 }
450 
panfrost_sampler_desc_init(const struct pipe_sampler_state * cso,struct mali_sampler_descriptor * hw)451 void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
452                                 struct mali_sampler_descriptor *hw)
453 {
454         unsigned func = panfrost_translate_compare_func(cso->compare_func);
455         bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
456         bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
457         bool mip_linear  = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
458         unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
459         unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
460         unsigned mip_filter = mip_linear  ?
461                               (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
462         unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
463 
464         *hw = (struct mali_sampler_descriptor) {
465                 .filter_mode = min_filter | mag_filter | mip_filter |
466                                normalized,
467                 .wrap_s = translate_tex_wrap(cso->wrap_s),
468                 .wrap_t = translate_tex_wrap(cso->wrap_t),
469                 .wrap_r = translate_tex_wrap(cso->wrap_r),
470                 .compare_func = cso->compare_mode ?
471                         panfrost_flip_compare_func(func) :
472                         MALI_FUNC_NEVER,
473                 .border_color = {
474                         cso->border_color.f[0],
475                         cso->border_color.f[1],
476                         cso->border_color.f[2],
477                         cso->border_color.f[3]
478                 },
479                 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
480                 .max_lod = FIXED_16(cso->max_lod, false),
481                 .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
482                 .seamless_cube_map = cso->seamless_cube_map,
483         };
484 
485         /* If necessary, we disable mipmapping in the sampler descriptor by
486          * clamping the LOD as tight as possible (from 0 to epsilon,
487          * essentially -- remember these are fixed point numbers, so
488          * epsilon=1/256) */
489 
490         if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
491                 hw->max_lod = hw->min_lod + 1;
492 }
493 
panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state * cso,struct bifrost_sampler_descriptor * hw)494 void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
495                                         struct bifrost_sampler_descriptor *hw)
496 {
497         *hw = (struct bifrost_sampler_descriptor) {
498                 .unk1 = 0x1,
499                 .wrap_s = translate_tex_wrap(cso->wrap_s),
500                 .wrap_t = translate_tex_wrap(cso->wrap_t),
501                 .wrap_r = translate_tex_wrap(cso->wrap_r),
502                 .unk8 = 0x8,
503                 .min_filter = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST,
504                 .norm_coords = cso->normalized_coords,
505                 .mip_filter = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR,
506                 .mag_filter = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR,
507                 .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
508                 .max_lod = FIXED_16(cso->max_lod, false),
509         };
510 
511         /* If necessary, we disable mipmapping in the sampler descriptor by
512          * clamping the LOD as tight as possible (from 0 to epsilon,
513          * essentially -- remember these are fixed point numbers, so
514          * epsilon=1/256) */
515 
516         if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
517                 hw->max_lod = hw->min_lod + 1;
518 }
519 
520 static void
panfrost_make_stencil_state(const struct pipe_stencil_state * in,struct mali_stencil_test * out)521 panfrost_make_stencil_state(const struct pipe_stencil_state *in,
522                             struct mali_stencil_test *out)
523 {
524         out->ref = 0; /* Gallium gets it from elsewhere */
525 
526         out->mask = in->valuemask;
527         out->func = panfrost_translate_compare_func(in->func);
528         out->sfail = panfrost_translate_stencil_op(in->fail_op);
529         out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
530         out->dppass = panfrost_translate_stencil_op(in->zpass_op);
531 }
532 
533 static void
panfrost_frag_meta_rasterizer_update(struct panfrost_context * ctx,struct mali_shader_meta * fragmeta)534 panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
535                                      struct mali_shader_meta *fragmeta)
536 {
537         if (!ctx->rasterizer) {
538                 SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
539                 SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
540                 fragmeta->depth_units = 0.0f;
541                 fragmeta->depth_factor = 0.0f;
542                 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
543                 SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
544                 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
545                 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
546                 return;
547         }
548 
549         struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
550 
551         bool msaa = rast->multisample;
552 
553         /* TODO: Sample size */
554         SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
555         SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
556 
557         struct panfrost_shader_state *fs;
558         fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
559 
560         /* EXT_shader_framebuffer_fetch requires the shader to be run
561          * per-sample when outputs are read. */
562         bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
563         SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
564 
565         fragmeta->depth_units = rast->offset_units * 2.0f;
566         fragmeta->depth_factor = rast->offset_scale;
567 
568         /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
569 
570         SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
571         SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
572 
573         SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
574         SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
575 }
576 
577 static void
panfrost_frag_meta_zsa_update(struct panfrost_context * ctx,struct mali_shader_meta * fragmeta)578 panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
579                               struct mali_shader_meta *fragmeta)
580 {
581         const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
582         int zfunc = PIPE_FUNC_ALWAYS;
583 
584         if (!zsa) {
585                 struct pipe_stencil_state default_stencil = {
586                         .enabled = 0,
587                         .func = PIPE_FUNC_ALWAYS,
588                         .fail_op = MALI_STENCIL_KEEP,
589                         .zfail_op = MALI_STENCIL_KEEP,
590                         .zpass_op = MALI_STENCIL_KEEP,
591                         .writemask = 0xFF,
592                         .valuemask = 0xFF
593                 };
594 
595                 panfrost_make_stencil_state(&default_stencil,
596                                             &fragmeta->stencil_front);
597                 fragmeta->stencil_mask_front = default_stencil.writemask;
598                 fragmeta->stencil_back = fragmeta->stencil_front;
599                 fragmeta->stencil_mask_back = default_stencil.writemask;
600                 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
601                 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
602         } else {
603                 SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
604                         zsa->stencil[0].enabled);
605                 panfrost_make_stencil_state(&zsa->stencil[0],
606                                             &fragmeta->stencil_front);
607                 fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
608                 fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
609 
610                 /* If back-stencil is not enabled, use the front values */
611 
612                 if (zsa->stencil[1].enabled) {
613                         panfrost_make_stencil_state(&zsa->stencil[1],
614                                                     &fragmeta->stencil_back);
615                         fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
616                         fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
617                 } else {
618                         fragmeta->stencil_back = fragmeta->stencil_front;
619                         fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
620                         fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
621                 }
622 
623                 if (zsa->depth.enabled)
624                         zfunc = zsa->depth.func;
625 
626                 /* Depth state (TODO: Refactor) */
627 
628                 SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
629                         zsa->depth.writemask);
630         }
631 
632         fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
633         fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
634 }
635 
636 static bool
panfrost_fs_required(struct panfrost_shader_state * fs,struct panfrost_blend_final * blend,unsigned rt_count)637 panfrost_fs_required(
638                 struct panfrost_shader_state *fs,
639                 struct panfrost_blend_final *blend,
640                 unsigned rt_count)
641 {
642         /* If we generally have side effects */
643         if (fs->fs_sidefx)
644                 return true;
645 
646         /* If colour is written we need to execute */
647         for (unsigned i = 0; i < rt_count; ++i) {
648                 if (!blend[i].no_colour)
649                         return true;
650         }
651 
652         /* If depth is written and not implied we need to execute.
653          * TODO: Predicate on Z/S writes being enabled */
654         return (fs->writes_depth || fs->writes_stencil);
655 }
656 
657 static void
panfrost_frag_meta_blend_update(struct panfrost_context * ctx,struct mali_shader_meta * fragmeta,void * rts)658 panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
659                                 struct mali_shader_meta *fragmeta,
660                                 void *rts)
661 {
662         struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
663         const struct panfrost_device *dev = pan_device(ctx->base.screen);
664         struct panfrost_shader_state *fs;
665         fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
666 
667         SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
668                 (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
669                 !ctx->blend->base.dither);
670 
671         SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
672                         ctx->blend->base.alpha_to_coverage);
673 
674         /* Get blending setup */
675         unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
676 
677         struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
678         unsigned shader_offset = 0;
679         struct panfrost_bo *shader_bo = NULL;
680 
681         for (unsigned c = 0; c < rt_count; ++c)
682                 blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
683                                                           &shader_offset);
684 
685         /* Disable shader execution if we can */
686         if (dev->quirks & MIDGARD_SHADERLESS
687                         && !panfrost_fs_required(fs, blend, rt_count)) {
688                 fragmeta->shader = 0;
689                 fragmeta->attribute_count = 0;
690                 fragmeta->varying_count = 0;
691                 fragmeta->texture_count = 0;
692                 fragmeta->sampler_count = 0;
693 
694                 /* This feature is not known to work on Bifrost */
695                 fragmeta->midgard1.work_count = 1;
696                 fragmeta->midgard1.uniform_count = 0;
697                 fragmeta->midgard1.uniform_buffer_count = 0;
698         }
699 
700          /* If there is a blend shader, work registers are shared. We impose 8
701           * work registers as a limit for blend shaders. Should be lower XXX */
702 
703         if (!(dev->quirks & IS_BIFROST)) {
704                 for (unsigned c = 0; c < rt_count; ++c) {
705                         if (blend[c].is_shader) {
706                                 fragmeta->midgard1.work_count =
707                                         MAX2(fragmeta->midgard1.work_count, 8);
708                         }
709                 }
710         }
711 
712         /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
713          * copied to the blend_meta appended (by convention), but this is the
714          * field actually read by the hardware. (Or maybe both are read...?).
715          * Specify the last RTi with a blend shader. */
716 
717         fragmeta->blend.shader = 0;
718 
719         for (signed rt = (rt_count - 1); rt >= 0; --rt) {
720                 if (!blend[rt].is_shader)
721                         continue;
722 
723                 fragmeta->blend.shader = blend[rt].shader.gpu |
724                                          blend[rt].shader.first_tag;
725                 break;
726         }
727 
728         if (dev->quirks & MIDGARD_SFBD) {
729                 /* When only a single render target platform is used, the blend
730                  * information is inside the shader meta itself. We additionally
731                  * need to signal CAN_DISCARD for nontrivial blend modes (so
732                  * we're able to read back the destination buffer) */
733 
734                 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
735                         blend[0].is_shader);
736 
737                 if (!blend[0].is_shader) {
738                         fragmeta->blend.equation = *blend[0].equation.equation;
739                         fragmeta->blend.constant = blend[0].equation.constant;
740                 }
741 
742                 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
743                         !blend[0].no_blending || fs->can_discard);
744 
745                 batch->draws |= PIPE_CLEAR_COLOR0;
746                 return;
747         }
748 
749         if (dev->quirks & IS_BIFROST) {
750                 bool no_blend = true;
751 
752                 for (unsigned i = 0; i < rt_count; ++i)
753                         no_blend &= (blend[i].no_blending | blend[i].no_colour);
754 
755                 SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
756                         !fs->can_discard && !fs->writes_depth && no_blend);
757         }
758 
759         /* Additional blend descriptor tacked on for jobs using MFBD */
760 
761         for (unsigned i = 0; i < rt_count; ++i) {
762                 unsigned flags = 0;
763 
764                 if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
765                         flags = 0x200;
766                         batch->draws |= (PIPE_CLEAR_COLOR0 << i);
767 
768                         bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
769                                        (ctx->pipe_framebuffer.cbufs[i]) &&
770                                        util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
771 
772                         SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
773                         SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
774                         SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
775                         SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
776                 }
777 
778                 if (dev->quirks & IS_BIFROST) {
779                         struct bifrost_blend_rt *brts = rts;
780 
781                         brts[i].flags = flags;
782 
783                         if (blend[i].is_shader) {
784                                 /* The blend shader's address needs to be at
785                                  * the same top 32 bit as the fragment shader.
786                                  * TODO: Ensure that's always the case.
787                                  */
788                                 assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
789                                        (fs->bo->gpu & (0xffffffffull << 32)));
790                                 brts[i].shader = blend[i].shader.gpu;
791                                 brts[i].unk2 = 0x0;
792                         } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
793                                 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
794                                 const struct util_format_description *format_desc;
795                                 format_desc = util_format_description(format);
796 
797                                 brts[i].equation = *blend[i].equation.equation;
798 
799                                 /* TODO: this is a bit more complicated */
800                                 brts[i].constant = blend[i].equation.constant;
801 
802                                 brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
803 
804                                 /* 0x19 disables blending and forces REPLACE
805                                  * mode (equivalent to rgb_mode = alpha_mode =
806                                  * x122, colour mask = 0xF). 0x1a allows
807                                  * blending. */
808                                 brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
809 
810                                 brts[i].shader_type = fs->blend_types[i];
811                         } else {
812                                 /* Dummy attachment for depth-only */
813                                 brts[i].unk2 = 0x3;
814                                 brts[i].shader_type = fs->blend_types[i];
815                         }
816                 } else {
817                         struct midgard_blend_rt *mrts = rts;
818                         mrts[i].flags = flags;
819 
820                         if (blend[i].is_shader) {
821                                 mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
822                         } else {
823                                 mrts[i].blend.equation = *blend[i].equation.equation;
824                                 mrts[i].blend.constant = blend[i].equation.constant;
825                         }
826                 }
827         }
828 }
829 
830 static void
panfrost_frag_shader_meta_init(struct panfrost_context * ctx,struct mali_shader_meta * fragmeta,void * rts)831 panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
832                                struct mali_shader_meta *fragmeta,
833                                void *rts)
834 {
835         const struct panfrost_device *dev = pan_device(ctx->base.screen);
836         struct panfrost_shader_state *fs;
837 
838         fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
839 
840         bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
841         fragmeta->coverage_mask = (msaa ? ctx->sample_mask : ~0) & 0xF;
842 
843         fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
844         fragmeta->unknown2_4 = 0x4e0;
845 
846         /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
847          * is required (independent of 32-bit/64-bit descriptors), or why it's
848          * not used on later GPU revisions. Otherwise, all shader jobs fault on
849          * these earlier chips (perhaps this is a chicken bit of some kind).
850          * More investigation is needed. */
851 
852         SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
853 
854         if (dev->quirks & IS_BIFROST) {
855                 /* TODO */
856         } else {
857                 /* Depending on whether it's legal to in the given shader, we try to
858                  * enable early-z testing. TODO: respect e-z force */
859 
860                 SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
861                         !fs->can_discard && !fs->writes_global &&
862                         !fs->writes_depth && !fs->writes_stencil &&
863                         !ctx->blend->base.alpha_to_coverage);
864 
865                 /* Add the writes Z/S flags if needed. */
866                 SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
867                 SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
868 
869                 /* Any time texturing is used, derivatives are implicitly calculated,
870                  * so we need to enable helper invocations */
871 
872                 SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
873                         fs->helper_invocations);
874 
875                 /* If discard is enabled, which bit we set to convey this
876                  * depends on if depth/stencil is used for the draw or not.
877                  * Just one of depth OR stencil is enough to trigger this. */
878 
879                 const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
880                 bool zs_enabled = fs->writes_depth || fs->writes_stencil;
881 
882                 if (zsa) {
883                         zs_enabled |= (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
884                         zs_enabled |= zsa->stencil[0].enabled;
885                 }
886 
887                 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
888                         fs->outputs_read || (!zs_enabled && fs->can_discard));
889                 SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
890         }
891 
892         panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
893         panfrost_frag_meta_zsa_update(ctx, fragmeta);
894         panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
895 }
896 
897 void
panfrost_emit_shader_meta(struct panfrost_batch * batch,enum pipe_shader_type st,struct mali_vertex_tiler_postfix * postfix)898 panfrost_emit_shader_meta(struct panfrost_batch *batch,
899                           enum pipe_shader_type st,
900                           struct mali_vertex_tiler_postfix *postfix)
901 {
902         struct panfrost_context *ctx = batch->ctx;
903         struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
904 
905         if (!ss) {
906                 postfix->shader = 0;
907                 return;
908         }
909 
910         struct mali_shader_meta meta;
911 
912         panfrost_shader_meta_init(ctx, st, &meta);
913 
914         /* Add the shader BO to the batch. */
915         panfrost_batch_add_bo(batch, ss->bo,
916                               PAN_BO_ACCESS_PRIVATE |
917                               PAN_BO_ACCESS_READ |
918                               panfrost_bo_access_for_stage(st));
919 
920         mali_ptr shader_ptr;
921 
922         if (st == PIPE_SHADER_FRAGMENT) {
923                 struct panfrost_device *dev = pan_device(ctx->base.screen);
924                 unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
925                 size_t desc_size = sizeof(meta);
926                 void *rts = NULL;
927                 struct panfrost_transfer xfer;
928                 unsigned rt_size;
929 
930                 if (dev->quirks & MIDGARD_SFBD)
931                         rt_size = 0;
932                 else if (dev->quirks & IS_BIFROST)
933                         rt_size = sizeof(struct bifrost_blend_rt);
934                 else
935                         rt_size = sizeof(struct midgard_blend_rt);
936 
937                 desc_size += rt_size * rt_count;
938 
939                 if (rt_size)
940                         rts = rzalloc_size(ctx, rt_size * rt_count);
941 
942                 panfrost_frag_shader_meta_init(ctx, &meta, rts);
943 
944                 xfer = panfrost_pool_alloc(&batch->pool, desc_size);
945 
946                 memcpy(xfer.cpu, &meta, sizeof(meta));
947                 memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
948 
949                 if (rt_size)
950                         ralloc_free(rts);
951 
952                 shader_ptr = xfer.gpu;
953         } else {
954                 shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
955                                                        sizeof(meta));
956         }
957 
958         postfix->shader = shader_ptr;
959 }
960 
961 static void
panfrost_mali_viewport_init(struct panfrost_context * ctx,struct mali_viewport * mvp)962 panfrost_mali_viewport_init(struct panfrost_context *ctx,
963                             struct mali_viewport *mvp)
964 {
965         const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
966 
967         /* Clip bounds are encoded as floats. The viewport itself is encoded as
968          * (somewhat) asymmetric ints. */
969 
970         const struct pipe_scissor_state *ss = &ctx->scissor;
971 
972         memset(mvp, 0, sizeof(*mvp));
973 
974         /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
975          * each direction. Clipping to the viewport in theory should work, but
976          * in practice causes issues when we're not explicitly trying to
977          * scissor */
978 
979         *mvp = (struct mali_viewport) {
980                 .clip_minx = -INFINITY,
981                 .clip_miny = -INFINITY,
982                 .clip_maxx = INFINITY,
983                 .clip_maxy = INFINITY,
984         };
985 
986         /* Always scissor to the viewport by default. */
987         float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
988         float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
989 
990         float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
991         float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
992 
993         float minz = (vp->translate[2] - fabsf(vp->scale[2]));
994         float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
995 
996         /* Apply the scissor test */
997 
998         unsigned minx, miny, maxx, maxy;
999 
1000         if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
1001                 minx = MAX2(ss->minx, vp_minx);
1002                 miny = MAX2(ss->miny, vp_miny);
1003                 maxx = MIN2(ss->maxx, vp_maxx);
1004                 maxy = MIN2(ss->maxy, vp_maxy);
1005         } else {
1006                 minx = vp_minx;
1007                 miny = vp_miny;
1008                 maxx = vp_maxx;
1009                 maxy = vp_maxy;
1010         }
1011 
1012         /* Hardware needs the min/max to be strictly ordered, so flip if we
1013          * need to. The viewport transformation in the vertex shader will
1014          * handle the negatives if we don't */
1015 
1016         if (miny > maxy) {
1017                 unsigned temp = miny;
1018                 miny = maxy;
1019                 maxy = temp;
1020         }
1021 
1022         if (minx > maxx) {
1023                 unsigned temp = minx;
1024                 minx = maxx;
1025                 maxx = temp;
1026         }
1027 
1028         if (minz > maxz) {
1029                 float temp = minz;
1030                 minz = maxz;
1031                 maxz = temp;
1032         }
1033 
1034         /* Clamp to the framebuffer size as a last check */
1035 
1036         minx = MIN2(ctx->pipe_framebuffer.width, minx);
1037         maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
1038 
1039         miny = MIN2(ctx->pipe_framebuffer.height, miny);
1040         maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
1041 
1042         /* Upload */
1043 
1044         mvp->viewport0[0] = minx;
1045         mvp->viewport1[0] = MALI_POSITIVE(maxx);
1046 
1047         mvp->viewport0[1] = miny;
1048         mvp->viewport1[1] = MALI_POSITIVE(maxy);
1049 
1050         bool clip_near = true;
1051         bool clip_far = true;
1052 
1053         if (ctx->rasterizer) {
1054                 clip_near = ctx->rasterizer->base.depth_clip_near;
1055                 clip_far = ctx->rasterizer->base.depth_clip_far;
1056         }
1057 
1058         mvp->clip_minz = clip_near ? minz : -INFINITY;
1059         mvp->clip_maxz = clip_far ? maxz : INFINITY;
1060 }
1061 
1062 void
panfrost_emit_viewport(struct panfrost_batch * batch,struct mali_vertex_tiler_postfix * tiler_postfix)1063 panfrost_emit_viewport(struct panfrost_batch *batch,
1064                        struct mali_vertex_tiler_postfix *tiler_postfix)
1065 {
1066         struct panfrost_context *ctx = batch->ctx;
1067         struct mali_viewport mvp;
1068 
1069         panfrost_mali_viewport_init(batch->ctx,  &mvp);
1070 
1071         /* Update the job, unless we're doing wallpapering (whose lack of
1072          * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
1073          * just... be faster :) */
1074 
1075         if (!ctx->wallpaper_batch)
1076                 panfrost_batch_union_scissor(batch, mvp.viewport0[0],
1077                                              mvp.viewport0[1],
1078                                              mvp.viewport1[0] + 1,
1079                                              mvp.viewport1[1] + 1);
1080 
1081         tiler_postfix->viewport = panfrost_pool_upload(&batch->pool, &mvp,
1082                                                             sizeof(mvp));
1083 }
1084 
1085 static mali_ptr
panfrost_map_constant_buffer_gpu(struct panfrost_batch * batch,enum pipe_shader_type st,struct panfrost_constant_buffer * buf,unsigned index)1086 panfrost_map_constant_buffer_gpu(struct panfrost_batch *batch,
1087                                  enum pipe_shader_type st,
1088                                  struct panfrost_constant_buffer *buf,
1089                                  unsigned index)
1090 {
1091         struct pipe_constant_buffer *cb = &buf->cb[index];
1092         struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1093 
1094         if (rsrc) {
1095                 panfrost_batch_add_bo(batch, rsrc->bo,
1096                                       PAN_BO_ACCESS_SHARED |
1097                                       PAN_BO_ACCESS_READ |
1098                                       panfrost_bo_access_for_stage(st));
1099 
1100                 /* Alignment gauranteed by
1101                  * PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
1102                 return rsrc->bo->gpu + cb->buffer_offset;
1103         } else if (cb->user_buffer) {
1104                 return panfrost_pool_upload(&batch->pool,
1105                                                  cb->user_buffer +
1106                                                  cb->buffer_offset,
1107                                                  cb->buffer_size);
1108         } else {
1109                 unreachable("No constant buffer");
1110         }
1111 }
1112 
1113 struct sysval_uniform {
1114         union {
1115                 float f[4];
1116                 int32_t i[4];
1117                 uint32_t u[4];
1118                 uint64_t du[2];
1119         };
1120 };
1121 
1122 static void
panfrost_upload_viewport_scale_sysval(struct panfrost_batch * batch,struct sysval_uniform * uniform)1123 panfrost_upload_viewport_scale_sysval(struct panfrost_batch *batch,
1124                                       struct sysval_uniform *uniform)
1125 {
1126         struct panfrost_context *ctx = batch->ctx;
1127         const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1128 
1129         uniform->f[0] = vp->scale[0];
1130         uniform->f[1] = vp->scale[1];
1131         uniform->f[2] = vp->scale[2];
1132 }
1133 
1134 static void
panfrost_upload_viewport_offset_sysval(struct panfrost_batch * batch,struct sysval_uniform * uniform)1135 panfrost_upload_viewport_offset_sysval(struct panfrost_batch *batch,
1136                                        struct sysval_uniform *uniform)
1137 {
1138         struct panfrost_context *ctx = batch->ctx;
1139         const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
1140 
1141         uniform->f[0] = vp->translate[0];
1142         uniform->f[1] = vp->translate[1];
1143         uniform->f[2] = vp->translate[2];
1144 }
1145 
panfrost_upload_txs_sysval(struct panfrost_batch * batch,enum pipe_shader_type st,unsigned int sysvalid,struct sysval_uniform * uniform)1146 static void panfrost_upload_txs_sysval(struct panfrost_batch *batch,
1147                                        enum pipe_shader_type st,
1148                                        unsigned int sysvalid,
1149                                        struct sysval_uniform *uniform)
1150 {
1151         struct panfrost_context *ctx = batch->ctx;
1152         unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
1153         unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
1154         bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
1155         struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
1156 
1157         assert(dim);
1158         uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
1159 
1160         if (dim > 1)
1161                 uniform->i[1] = u_minify(tex->texture->height0,
1162                                          tex->u.tex.first_level);
1163 
1164         if (dim > 2)
1165                 uniform->i[2] = u_minify(tex->texture->depth0,
1166                                          tex->u.tex.first_level);
1167 
1168         if (is_array)
1169                 uniform->i[dim] = tex->texture->array_size;
1170 }
1171 
1172 static void
panfrost_upload_ssbo_sysval(struct panfrost_batch * batch,enum pipe_shader_type st,unsigned ssbo_id,struct sysval_uniform * uniform)1173 panfrost_upload_ssbo_sysval(struct panfrost_batch *batch,
1174                             enum pipe_shader_type st,
1175                             unsigned ssbo_id,
1176                             struct sysval_uniform *uniform)
1177 {
1178         struct panfrost_context *ctx = batch->ctx;
1179 
1180         assert(ctx->ssbo_mask[st] & (1 << ssbo_id));
1181         struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
1182 
1183         /* Compute address */
1184         struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
1185 
1186         panfrost_batch_add_bo(batch, bo,
1187                               PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_RW |
1188                               panfrost_bo_access_for_stage(st));
1189 
1190         /* Upload address and size as sysval */
1191         uniform->du[0] = bo->gpu + sb.buffer_offset;
1192         uniform->u[2] = sb.buffer_size;
1193 }
1194 
1195 static void
panfrost_upload_sampler_sysval(struct panfrost_batch * batch,enum pipe_shader_type st,unsigned samp_idx,struct sysval_uniform * uniform)1196 panfrost_upload_sampler_sysval(struct panfrost_batch *batch,
1197                                enum pipe_shader_type st,
1198                                unsigned samp_idx,
1199                                struct sysval_uniform *uniform)
1200 {
1201         struct panfrost_context *ctx = batch->ctx;
1202         struct pipe_sampler_state *sampl = &ctx->samplers[st][samp_idx]->base;
1203 
1204         uniform->f[0] = sampl->min_lod;
1205         uniform->f[1] = sampl->max_lod;
1206         uniform->f[2] = sampl->lod_bias;
1207 
1208         /* Even without any errata, Midgard represents "no mipmapping" as
1209          * fixing the LOD with the clamps; keep behaviour consistent. c.f.
1210          * panfrost_create_sampler_state which also explains our choice of
1211          * epsilon value (again to keep behaviour consistent) */
1212 
1213         if (sampl->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
1214                 uniform->f[1] = uniform->f[0] + (1.0/256.0);
1215 }
1216 
1217 static void
panfrost_upload_num_work_groups_sysval(struct panfrost_batch * batch,struct sysval_uniform * uniform)1218 panfrost_upload_num_work_groups_sysval(struct panfrost_batch *batch,
1219                                        struct sysval_uniform *uniform)
1220 {
1221         struct panfrost_context *ctx = batch->ctx;
1222 
1223         uniform->u[0] = ctx->compute_grid->grid[0];
1224         uniform->u[1] = ctx->compute_grid->grid[1];
1225         uniform->u[2] = ctx->compute_grid->grid[2];
1226 }
1227 
1228 static void
panfrost_upload_sysvals(struct panfrost_batch * batch,void * buf,struct panfrost_shader_state * ss,enum pipe_shader_type st)1229 panfrost_upload_sysvals(struct panfrost_batch *batch, void *buf,
1230                         struct panfrost_shader_state *ss,
1231                         enum pipe_shader_type st)
1232 {
1233         struct sysval_uniform *uniforms = (void *)buf;
1234 
1235         for (unsigned i = 0; i < ss->sysval_count; ++i) {
1236                 int sysval = ss->sysval[i];
1237 
1238                 switch (PAN_SYSVAL_TYPE(sysval)) {
1239                 case PAN_SYSVAL_VIEWPORT_SCALE:
1240                         panfrost_upload_viewport_scale_sysval(batch,
1241                                                               &uniforms[i]);
1242                         break;
1243                 case PAN_SYSVAL_VIEWPORT_OFFSET:
1244                         panfrost_upload_viewport_offset_sysval(batch,
1245                                                                &uniforms[i]);
1246                         break;
1247                 case PAN_SYSVAL_TEXTURE_SIZE:
1248                         panfrost_upload_txs_sysval(batch, st,
1249                                                    PAN_SYSVAL_ID(sysval),
1250                                                    &uniforms[i]);
1251                         break;
1252                 case PAN_SYSVAL_SSBO:
1253                         panfrost_upload_ssbo_sysval(batch, st,
1254                                                     PAN_SYSVAL_ID(sysval),
1255                                                     &uniforms[i]);
1256                         break;
1257                 case PAN_SYSVAL_NUM_WORK_GROUPS:
1258                         panfrost_upload_num_work_groups_sysval(batch,
1259                                                                &uniforms[i]);
1260                         break;
1261                 case PAN_SYSVAL_SAMPLER:
1262                         panfrost_upload_sampler_sysval(batch, st,
1263                                                        PAN_SYSVAL_ID(sysval),
1264                                                        &uniforms[i]);
1265                         break;
1266                 default:
1267                         assert(0);
1268                 }
1269         }
1270 }
1271 
1272 static const void *
panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer * buf,unsigned index)1273 panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf,
1274                                  unsigned index)
1275 {
1276         struct pipe_constant_buffer *cb = &buf->cb[index];
1277         struct panfrost_resource *rsrc = pan_resource(cb->buffer);
1278 
1279         if (rsrc)
1280                 return rsrc->bo->cpu;
1281         else if (cb->user_buffer)
1282                 return cb->user_buffer;
1283         else
1284                 unreachable("No constant buffer");
1285 }
1286 
1287 void
panfrost_emit_const_buf(struct panfrost_batch * batch,enum pipe_shader_type stage,struct mali_vertex_tiler_postfix * postfix)1288 panfrost_emit_const_buf(struct panfrost_batch *batch,
1289                         enum pipe_shader_type stage,
1290                         struct mali_vertex_tiler_postfix *postfix)
1291 {
1292         struct panfrost_context *ctx = batch->ctx;
1293         struct panfrost_shader_variants *all = ctx->shader[stage];
1294 
1295         if (!all)
1296                 return;
1297 
1298         struct panfrost_constant_buffer *buf = &ctx->constant_buffer[stage];
1299 
1300         struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1301 
1302         /* Uniforms are implicitly UBO #0 */
1303         bool has_uniforms = buf->enabled_mask & (1 << 0);
1304 
1305         /* Allocate room for the sysval and the uniforms */
1306         size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
1307         size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
1308         size_t size = sys_size + uniform_size;
1309         struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1310                                                                         size);
1311 
1312         /* Upload sysvals requested by the shader */
1313         panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
1314 
1315         /* Upload uniforms */
1316         if (has_uniforms && uniform_size) {
1317                 const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
1318                 memcpy(transfer.cpu + sys_size, cpu, uniform_size);
1319         }
1320 
1321         /* Next up, attach UBOs. UBO #0 is the uniforms we just
1322          * uploaded */
1323 
1324         unsigned ubo_count = panfrost_ubo_count(ctx, stage);
1325         assert(ubo_count >= 1);
1326 
1327         size_t sz = sizeof(uint64_t) * ubo_count;
1328         uint64_t ubos[PAN_MAX_CONST_BUFFERS];
1329         int uniform_count = ss->uniform_count;
1330 
1331         /* Upload uniforms as a UBO */
1332         ubos[0] = MALI_MAKE_UBO(2 + uniform_count, transfer.gpu);
1333 
1334         /* The rest are honest-to-goodness UBOs */
1335 
1336         for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
1337                 size_t usz = buf->cb[ubo].buffer_size;
1338                 bool enabled = buf->enabled_mask & (1 << ubo);
1339                 bool empty = usz == 0;
1340 
1341                 if (!enabled || empty) {
1342                         /* Stub out disabled UBOs to catch accesses */
1343                         ubos[ubo] = MALI_MAKE_UBO(0, 0xDEAD0000);
1344                         continue;
1345                 }
1346 
1347                 mali_ptr gpu = panfrost_map_constant_buffer_gpu(batch, stage,
1348                                                                 buf, ubo);
1349 
1350                 unsigned bytes_per_field = 16;
1351                 unsigned aligned = ALIGN_POT(usz, bytes_per_field);
1352                 ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
1353         }
1354 
1355         mali_ptr ubufs = panfrost_pool_upload(&batch->pool, ubos, sz);
1356         postfix->uniforms = transfer.gpu;
1357         postfix->uniform_buffers = ubufs;
1358 
1359         buf->dirty_mask = 0;
1360 }
1361 
1362 void
panfrost_emit_shared_memory(struct panfrost_batch * batch,const struct pipe_grid_info * info,struct midgard_payload_vertex_tiler * vtp)1363 panfrost_emit_shared_memory(struct panfrost_batch *batch,
1364                             const struct pipe_grid_info *info,
1365                             struct midgard_payload_vertex_tiler *vtp)
1366 {
1367         struct panfrost_context *ctx = batch->ctx;
1368         struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
1369         struct panfrost_shader_state *ss = &all->variants[all->active_variant];
1370         unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
1371                                                            128));
1372         unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
1373                                info->grid[2] * 4;
1374         struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
1375                                                                   shared_size,
1376                                                                   1);
1377 
1378         struct mali_shared_memory shared = {
1379                 .shared_memory = bo->gpu,
1380                 .shared_workgroup_count =
1381                         util_logbase2_ceil(info->grid[0]) +
1382                         util_logbase2_ceil(info->grid[1]) +
1383                         util_logbase2_ceil(info->grid[2]),
1384                 .shared_unk1 = 0x2,
1385                 .shared_shift = util_logbase2(single_size) - 1
1386         };
1387 
1388         vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
1389                                                                sizeof(shared));
1390 }
1391 
1392 static mali_ptr
panfrost_get_tex_desc(struct panfrost_batch * batch,enum pipe_shader_type st,struct panfrost_sampler_view * view)1393 panfrost_get_tex_desc(struct panfrost_batch *batch,
1394                       enum pipe_shader_type st,
1395                       struct panfrost_sampler_view *view)
1396 {
1397         if (!view)
1398                 return (mali_ptr) 0;
1399 
1400         struct pipe_sampler_view *pview = &view->base;
1401         struct panfrost_resource *rsrc = pan_resource(pview->texture);
1402 
1403         /* Add the BO to the job so it's retained until the job is done. */
1404 
1405         panfrost_batch_add_bo(batch, rsrc->bo,
1406                               PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1407                               panfrost_bo_access_for_stage(st));
1408 
1409         panfrost_batch_add_bo(batch, view->bo,
1410                               PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1411                               panfrost_bo_access_for_stage(st));
1412 
1413         return view->bo->gpu;
1414 }
1415 
1416 static void
panfrost_update_sampler_view(struct panfrost_sampler_view * view,struct pipe_context * pctx)1417 panfrost_update_sampler_view(struct panfrost_sampler_view *view,
1418                              struct pipe_context *pctx)
1419 {
1420         struct panfrost_resource *rsrc = pan_resource(view->base.texture);
1421         if (view->texture_bo != rsrc->bo->gpu ||
1422             view->layout != rsrc->layout) {
1423                 panfrost_bo_unreference(view->bo);
1424                 panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
1425         }
1426 }
1427 
1428 void
panfrost_emit_texture_descriptors(struct panfrost_batch * batch,enum pipe_shader_type stage,struct mali_vertex_tiler_postfix * postfix)1429 panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
1430                                   enum pipe_shader_type stage,
1431                                   struct mali_vertex_tiler_postfix *postfix)
1432 {
1433         struct panfrost_context *ctx = batch->ctx;
1434         struct panfrost_device *device = pan_device(ctx->base.screen);
1435 
1436         if (!ctx->sampler_view_count[stage])
1437                 return;
1438 
1439         if (device->quirks & IS_BIFROST) {
1440                 struct bifrost_texture_descriptor *descriptors;
1441 
1442                 descriptors = malloc(sizeof(struct bifrost_texture_descriptor) *
1443                                      ctx->sampler_view_count[stage]);
1444 
1445                 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1446                         struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1447                         struct pipe_sampler_view *pview = &view->base;
1448                         struct panfrost_resource *rsrc = pan_resource(pview->texture);
1449                         panfrost_update_sampler_view(view, &ctx->base);
1450 
1451                         /* Add the BOs to the job so they are retained until the job is done. */
1452 
1453                         panfrost_batch_add_bo(batch, rsrc->bo,
1454                                               PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1455                                               panfrost_bo_access_for_stage(stage));
1456 
1457                         panfrost_batch_add_bo(batch, view->bo,
1458                                               PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
1459                                               panfrost_bo_access_for_stage(stage));
1460 
1461                         memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
1462                 }
1463 
1464                 postfix->textures = panfrost_pool_upload(&batch->pool,
1465                                                               descriptors,
1466                                                               sizeof(struct bifrost_texture_descriptor) *
1467                                                                       ctx->sampler_view_count[stage]);
1468 
1469                 free(descriptors);
1470         } else {
1471                 uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
1472 
1473                 for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
1474                         struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
1475 
1476                         panfrost_update_sampler_view(view, &ctx->base);
1477 
1478                         trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
1479                 }
1480 
1481                 postfix->textures = panfrost_pool_upload(&batch->pool,
1482                                                               trampolines,
1483                                                               sizeof(uint64_t) *
1484                                                               ctx->sampler_view_count[stage]);
1485         }
1486 }
1487 
1488 void
panfrost_emit_sampler_descriptors(struct panfrost_batch * batch,enum pipe_shader_type stage,struct mali_vertex_tiler_postfix * postfix)1489 panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
1490                                   enum pipe_shader_type stage,
1491                                   struct mali_vertex_tiler_postfix *postfix)
1492 {
1493         struct panfrost_context *ctx = batch->ctx;
1494         struct panfrost_device *device = pan_device(ctx->base.screen);
1495 
1496         if (!ctx->sampler_count[stage])
1497                 return;
1498 
1499         if (device->quirks & IS_BIFROST) {
1500                 size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
1501                 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1502                 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1503                                                                                 transfer_size);
1504                 struct bifrost_sampler_descriptor *desc = (struct bifrost_sampler_descriptor *)transfer.cpu;
1505 
1506                 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1507                         desc[i] = ctx->samplers[stage][i]->bifrost_hw;
1508 
1509                 postfix->sampler_descriptor = transfer.gpu;
1510         } else {
1511                 size_t desc_size = sizeof(struct mali_sampler_descriptor);
1512                 size_t transfer_size = desc_size * ctx->sampler_count[stage];
1513                 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1514                                                                                 transfer_size);
1515                 struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
1516 
1517                 for (int i = 0; i < ctx->sampler_count[stage]; ++i)
1518                         desc[i] = ctx->samplers[stage][i]->midgard_hw;
1519 
1520                 postfix->sampler_descriptor = transfer.gpu;
1521         }
1522 }
1523 
1524 void
panfrost_emit_vertex_attr_meta(struct panfrost_batch * batch,struct mali_vertex_tiler_postfix * vertex_postfix)1525 panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
1526                                struct mali_vertex_tiler_postfix *vertex_postfix)
1527 {
1528         struct panfrost_context *ctx = batch->ctx;
1529 
1530         if (!ctx->vertex)
1531                 return;
1532 
1533         struct panfrost_vertex_state *so = ctx->vertex;
1534 
1535         panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
1536         vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
1537                                                                sizeof(*so->hw) *
1538                                                                PAN_MAX_ATTRIBUTE);
1539 }
1540 
1541 void
panfrost_emit_vertex_data(struct panfrost_batch * batch,struct mali_vertex_tiler_postfix * vertex_postfix)1542 panfrost_emit_vertex_data(struct panfrost_batch *batch,
1543                           struct mali_vertex_tiler_postfix *vertex_postfix)
1544 {
1545         struct panfrost_context *ctx = batch->ctx;
1546         struct panfrost_vertex_state *so = ctx->vertex;
1547 
1548         /* Staged mali_attr, and index into them. i =/= k, depending on the
1549          * vertex buffer mask and instancing. Twice as much room is allocated,
1550          * for a worst case of NPOT_DIVIDEs which take up extra slot */
1551         union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
1552         unsigned k = 0;
1553 
1554         for (unsigned i = 0; i < so->num_elements; ++i) {
1555                 /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
1556                  * means duplicating some vertex buffers (who cares? aside from
1557                  * maybe some caching implications but I somehow doubt that
1558                  * matters) */
1559 
1560                 struct pipe_vertex_element *elem = &so->pipe[i];
1561                 unsigned vbi = elem->vertex_buffer_index;
1562 
1563                 /* The exception to 1:1 mapping is that we can have multiple
1564                  * entries (NPOT divisors), so we fixup anyways */
1565 
1566                 so->hw[i].index = k;
1567 
1568                 if (!(ctx->vb_mask & (1 << vbi)))
1569                         continue;
1570 
1571                 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
1572                 struct panfrost_resource *rsrc;
1573 
1574                 rsrc = pan_resource(buf->buffer.resource);
1575                 if (!rsrc)
1576                         continue;
1577 
1578                 /* Align to 64 bytes by masking off the lower bits. This
1579                  * will be adjusted back when we fixup the src_offset in
1580                  * mali_attr_meta */
1581 
1582                 mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
1583                 mali_ptr addr = raw_addr & ~63;
1584                 unsigned chopped_addr = raw_addr - addr;
1585 
1586                 /* Add a dependency of the batch on the vertex buffer */
1587                 panfrost_batch_add_bo(batch, rsrc->bo,
1588                                       PAN_BO_ACCESS_SHARED |
1589                                       PAN_BO_ACCESS_READ |
1590                                       PAN_BO_ACCESS_VERTEX_TILER);
1591 
1592                 /* Set common fields */
1593                 attrs[k].elements = addr;
1594                 attrs[k].stride = buf->stride;
1595 
1596                 /* Since we advanced the base pointer, we shrink the buffer
1597                  * size */
1598                 attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
1599 
1600                 /* We need to add the extra size we masked off (for
1601                  * correctness) so the data doesn't get clamped away */
1602                 attrs[k].size += chopped_addr;
1603 
1604                 /* For non-instancing make sure we initialize */
1605                 attrs[k].shift = attrs[k].extra_flags = 0;
1606 
1607                 /* Instancing uses a dramatically different code path than
1608                  * linear, so dispatch for the actual emission now that the
1609                  * common code is finished */
1610 
1611                 unsigned divisor = elem->instance_divisor;
1612 
1613                 if (divisor && ctx->instance_count == 1) {
1614                         /* Silly corner case where there's a divisor(=1) but
1615                          * there's no legitimate instancing. So we want *every*
1616                          * attribute to be the same. So set stride to zero so
1617                          * we don't go anywhere. */
1618 
1619                         attrs[k].size = attrs[k].stride + chopped_addr;
1620                         attrs[k].stride = 0;
1621                         attrs[k++].elements |= MALI_ATTR_LINEAR;
1622                 } else if (ctx->instance_count <= 1) {
1623                         /* Normal, non-instanced attributes */
1624                         attrs[k++].elements |= MALI_ATTR_LINEAR;
1625                 } else {
1626                         unsigned instance_shift = vertex_postfix->instance_shift;
1627                         unsigned instance_odd = vertex_postfix->instance_odd;
1628 
1629                         k += panfrost_vertex_instanced(ctx->padded_count,
1630                                                        instance_shift,
1631                                                        instance_odd,
1632                                                        divisor, &attrs[k]);
1633                 }
1634         }
1635 
1636         /* Add special gl_VertexID/gl_InstanceID buffers */
1637 
1638         panfrost_vertex_id(ctx->padded_count, &attrs[k]);
1639         so->hw[PAN_VERTEX_ID].index = k++;
1640         panfrost_instance_id(ctx->padded_count, &attrs[k]);
1641         so->hw[PAN_INSTANCE_ID].index = k++;
1642 
1643         /* Upload whatever we emitted and go */
1644 
1645         vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
1646                                                            k * sizeof(*attrs));
1647 }
1648 
1649 static mali_ptr
panfrost_emit_varyings(struct panfrost_batch * batch,union mali_attr * slot,unsigned stride,unsigned count)1650 panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
1651                        unsigned stride, unsigned count)
1652 {
1653         /* Fill out the descriptor */
1654         slot->stride = stride;
1655         slot->size = stride * count;
1656         slot->shift = slot->extra_flags = 0;
1657 
1658         struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
1659                                                                         slot->size);
1660 
1661         slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
1662 
1663         return transfer.gpu;
1664 }
1665 
1666 static unsigned
panfrost_streamout_offset(unsigned stride,unsigned offset,struct pipe_stream_output_target * target)1667 panfrost_streamout_offset(unsigned stride, unsigned offset,
1668                         struct pipe_stream_output_target *target)
1669 {
1670         return (target->buffer_offset + (offset * stride * 4)) & 63;
1671 }
1672 
1673 static void
panfrost_emit_streamout(struct panfrost_batch * batch,union mali_attr * slot,unsigned stride,unsigned offset,unsigned count,struct pipe_stream_output_target * target)1674 panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
1675                         unsigned stride, unsigned offset, unsigned count,
1676                         struct pipe_stream_output_target *target)
1677 {
1678         /* Fill out the descriptor */
1679         slot->stride = stride * 4;
1680         slot->shift = slot->extra_flags = 0;
1681 
1682         unsigned max_size = target->buffer_size;
1683         unsigned expected_size = slot->stride * count;
1684 
1685         /* Grab the BO and bind it to the batch */
1686         struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
1687 
1688         /* Varyings are WRITE from the perspective of the VERTEX but READ from
1689          * the perspective of the TILER and FRAGMENT.
1690          */
1691         panfrost_batch_add_bo(batch, bo,
1692                               PAN_BO_ACCESS_SHARED |
1693                               PAN_BO_ACCESS_RW |
1694                               PAN_BO_ACCESS_VERTEX_TILER |
1695                               PAN_BO_ACCESS_FRAGMENT);
1696 
1697         /* We will have an offset applied to get alignment */
1698         mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
1699         slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
1700         slot->size = MIN2(max_size, expected_size) + (addr & 63);
1701 }
1702 
1703 static bool
has_point_coord(unsigned mask,gl_varying_slot loc)1704 has_point_coord(unsigned mask, gl_varying_slot loc)
1705 {
1706         if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
1707                 return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
1708         else if (loc == VARYING_SLOT_PNTC)
1709                 return (mask & (1 << 8));
1710         else
1711                 return false;
1712 }
1713 
1714 /* Helpers for manipulating stream out information so we can pack varyings
1715  * accordingly. Compute the src_offset for a given captured varying */
1716 
1717 static struct pipe_stream_output *
pan_get_so(struct pipe_stream_output_info * info,gl_varying_slot loc)1718 pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
1719 {
1720         for (unsigned i = 0; i < info->num_outputs; ++i) {
1721                 if (info->output[i].register_index == loc)
1722                         return &info->output[i];
1723         }
1724 
1725         unreachable("Varying not captured");
1726 }
1727 
1728 static unsigned
pan_varying_size(enum mali_format fmt)1729 pan_varying_size(enum mali_format fmt)
1730 {
1731         unsigned type = MALI_EXTRACT_TYPE(fmt);
1732         unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
1733         unsigned bits = MALI_EXTRACT_BITS(fmt);
1734         unsigned bpc = 0;
1735 
1736         if (bits == MALI_CHANNEL_FLOAT) {
1737                 /* No doubles */
1738                 bool fp16 = (type == MALI_FORMAT_SINT);
1739                 assert(fp16 || (type == MALI_FORMAT_UNORM));
1740 
1741                 bpc = fp16 ? 2 : 4;
1742         } else {
1743                 assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
1744 
1745                 /* See the enums */
1746                 bits = 1 << bits;
1747                 assert(bits >= 8);
1748                 bpc = bits / 8;
1749         }
1750 
1751         return bpc * chan;
1752 }
1753 
1754 /* Indices for named (non-XFB) varyings that are present. These are packed
1755  * tightly so they correspond to a bitfield present (P) indexed by (1 <<
1756  * PAN_VARY_*). This has the nice property that you can lookup the buffer index
1757  * of a given special field given a shift S by:
1758  *
1759  *      idx = popcount(P & ((1 << S) - 1))
1760  *
1761  * That is... look at all of the varyings that come earlier and count them, the
1762  * count is the new index since plus one. Likewise, the total number of special
1763  * buffers required is simply popcount(P)
1764  */
1765 
1766 enum pan_special_varying {
1767         PAN_VARY_GENERAL = 0,
1768         PAN_VARY_POSITION = 1,
1769         PAN_VARY_PSIZ = 2,
1770         PAN_VARY_PNTCOORD = 3,
1771         PAN_VARY_FACE = 4,
1772         PAN_VARY_FRAGCOORD = 5,
1773 
1774         /* Keep last */
1775         PAN_VARY_MAX,
1776 };
1777 
1778 /* Given a varying, figure out which index it correpsonds to */
1779 
1780 static inline unsigned
pan_varying_index(unsigned present,enum pan_special_varying v)1781 pan_varying_index(unsigned present, enum pan_special_varying v)
1782 {
1783         unsigned mask = (1 << v) - 1;
1784         return util_bitcount(present & mask);
1785 }
1786 
1787 /* Get the base offset for XFB buffers, which by convention come after
1788  * everything else. Wrapper function for semantic reasons; by construction this
1789  * is just popcount. */
1790 
1791 static inline unsigned
pan_xfb_base(unsigned present)1792 pan_xfb_base(unsigned present)
1793 {
1794         return util_bitcount(present);
1795 }
1796 
1797 /* Computes the present mask for varyings so we can start emitting varying records */
1798 
1799 static inline unsigned
pan_varying_present(struct panfrost_shader_state * vs,struct panfrost_shader_state * fs,unsigned quirks)1800 pan_varying_present(
1801         struct panfrost_shader_state *vs,
1802         struct panfrost_shader_state *fs,
1803         unsigned quirks)
1804 {
1805         /* At the moment we always emit general and position buffers. Not
1806          * strictly necessary but usually harmless */
1807 
1808         unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
1809 
1810         /* Enable special buffers by the shader info */
1811 
1812         if (vs->writes_point_size)
1813                 present |= (1 << PAN_VARY_PSIZ);
1814 
1815         if (fs->reads_point_coord)
1816                 present |= (1 << PAN_VARY_PNTCOORD);
1817 
1818         if (fs->reads_face)
1819                 present |= (1 << PAN_VARY_FACE);
1820 
1821         if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
1822                 present |= (1 << PAN_VARY_FRAGCOORD);
1823 
1824         /* Also, if we have a point sprite, we need a point coord buffer */
1825 
1826         for (unsigned i = 0; i < fs->varying_count; i++)  {
1827                 gl_varying_slot loc = fs->varyings_loc[i];
1828 
1829                 if (has_point_coord(fs->point_sprite_mask, loc))
1830                         present |= (1 << PAN_VARY_PNTCOORD);
1831         }
1832 
1833         return present;
1834 }
1835 
1836 /* Emitters for varying records */
1837 
1838 static struct mali_attr_meta
pan_emit_vary(unsigned present,enum pan_special_varying buf,unsigned quirks,enum mali_format format,unsigned offset)1839 pan_emit_vary(unsigned present, enum pan_special_varying buf,
1840                 unsigned quirks, enum mali_format format,
1841                 unsigned offset)
1842 {
1843         unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
1844 
1845         struct mali_attr_meta meta = {
1846                 .index = pan_varying_index(present, buf),
1847                 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1848                 .swizzle = quirks & HAS_SWIZZLES ?
1849                         panfrost_get_default_swizzle(nr_channels) :
1850                         panfrost_bifrost_swizzle(nr_channels),
1851                 .format = format,
1852                 .src_offset = offset
1853         };
1854 
1855         return meta;
1856 }
1857 
1858 /* General varying that is unused */
1859 
1860 static struct mali_attr_meta
pan_emit_vary_only(unsigned present,unsigned quirks)1861 pan_emit_vary_only(unsigned present, unsigned quirks)
1862 {
1863         return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
1864 }
1865 
1866 /* Special records */
1867 
1868 static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
1869         [PAN_VARY_POSITION]     = MALI_VARYING_POS,
1870         [PAN_VARY_PSIZ]         = MALI_R16F,
1871         [PAN_VARY_PNTCOORD]     = MALI_R16F,
1872         [PAN_VARY_FACE]         = MALI_R32I,
1873         [PAN_VARY_FRAGCOORD]    = MALI_RGBA32F
1874 };
1875 
1876 static struct mali_attr_meta
pan_emit_vary_special(unsigned present,enum pan_special_varying buf,unsigned quirks)1877 pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
1878                 unsigned quirks)
1879 {
1880         assert(buf < PAN_VARY_MAX);
1881         return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
1882 }
1883 
1884 static enum mali_format
pan_xfb_format(enum mali_format format,unsigned nr)1885 pan_xfb_format(enum mali_format format, unsigned nr)
1886 {
1887         if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
1888                 return MALI_R32F | MALI_NR_CHANNELS(nr);
1889         else
1890                 return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
1891 }
1892 
1893 /* Transform feedback records. Note struct pipe_stream_output is (if packed as
1894  * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
1895  * value. */
1896 
1897 static struct mali_attr_meta
pan_emit_vary_xfb(unsigned present,unsigned max_xfb,unsigned * streamout_offsets,unsigned quirks,enum mali_format format,struct pipe_stream_output o)1898 pan_emit_vary_xfb(unsigned present,
1899                 unsigned max_xfb,
1900                 unsigned *streamout_offsets,
1901                 unsigned quirks,
1902                 enum mali_format format,
1903                 struct pipe_stream_output o)
1904 {
1905         /* Otherwise construct a record for it */
1906         struct mali_attr_meta meta = {
1907                 /* XFB buffers come after everything else */
1908                 .index = pan_xfb_base(present) + o.output_buffer,
1909 
1910                 /* As usual unknown bit */
1911                 .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
1912 
1913                 /* Override swizzle with number of channels */
1914                 .swizzle = quirks & HAS_SWIZZLES ?
1915                         panfrost_get_default_swizzle(o.num_components) :
1916                         panfrost_bifrost_swizzle(o.num_components),
1917 
1918                 /* Override number of channels and precision to highp */
1919                 .format = pan_xfb_format(format, o.num_components),
1920 
1921                 /* Apply given offsets together */
1922                 .src_offset = (o.dst_offset * 4) /* dwords */
1923                         + streamout_offsets[o.output_buffer]
1924         };
1925 
1926         return meta;
1927 }
1928 
1929 /* Determine if we should capture a varying for XFB. This requires actually
1930  * having a buffer for it. If we don't capture it, we'll fallback to a general
1931  * varying path (linked or unlinked, possibly discarding the write) */
1932 
1933 static bool
panfrost_xfb_captured(struct panfrost_shader_state * xfb,unsigned loc,unsigned max_xfb)1934 panfrost_xfb_captured(struct panfrost_shader_state *xfb,
1935                 unsigned loc, unsigned max_xfb)
1936 {
1937         if (!(xfb->so_mask & (1ll << loc)))
1938                 return false;
1939 
1940         struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1941         return o->output_buffer < max_xfb;
1942 }
1943 
1944 /* Higher-level wrapper around all of the above, classifying a varying into one
1945  * of the above types */
1946 
1947 static struct mali_attr_meta
panfrost_emit_varying(struct panfrost_shader_state * stage,struct panfrost_shader_state * other,struct panfrost_shader_state * xfb,unsigned present,unsigned max_xfb,unsigned * streamout_offsets,unsigned quirks,unsigned * gen_offsets,enum mali_format * gen_formats,unsigned * gen_stride,unsigned idx,bool should_alloc,bool is_fragment)1948 panfrost_emit_varying(
1949                 struct panfrost_shader_state *stage,
1950                 struct panfrost_shader_state *other,
1951                 struct panfrost_shader_state *xfb,
1952                 unsigned present,
1953                 unsigned max_xfb,
1954                 unsigned *streamout_offsets,
1955                 unsigned quirks,
1956                 unsigned *gen_offsets,
1957                 enum mali_format *gen_formats,
1958                 unsigned *gen_stride,
1959                 unsigned idx,
1960                 bool should_alloc,
1961                 bool is_fragment)
1962 {
1963         gl_varying_slot loc = stage->varyings_loc[idx];
1964         enum mali_format format = stage->varyings[idx];
1965 
1966         /* Override format to match linkage */
1967         if (!should_alloc && gen_formats[idx])
1968                 format = gen_formats[idx];
1969 
1970         if (has_point_coord(stage->point_sprite_mask, loc)) {
1971                 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1972         } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
1973                 struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
1974                 return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
1975         } else if (loc == VARYING_SLOT_POS) {
1976                 if (is_fragment)
1977                         return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
1978                 else
1979                         return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
1980         } else if (loc == VARYING_SLOT_PSIZ) {
1981                 return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
1982         } else if (loc == VARYING_SLOT_PNTC) {
1983                 return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
1984         } else if (loc == VARYING_SLOT_FACE) {
1985                 return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
1986         }
1987 
1988         /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
1989         signed other_idx = -1;
1990 
1991         for (unsigned j = 0; j < other->varying_count; ++j) {
1992                 if (other->varyings_loc[j] == loc) {
1993                         other_idx = j;
1994                         break;
1995                 }
1996         }
1997 
1998         if (other_idx < 0)
1999                 return pan_emit_vary_only(present, quirks);
2000 
2001         unsigned offset = gen_offsets[other_idx];
2002 
2003         if (should_alloc) {
2004                 /* We're linked, so allocate a space via a watermark allocation */
2005                 enum mali_format alt = other->varyings[other_idx];
2006 
2007                 /* Do interpolation at minimum precision */
2008                 unsigned size_main = pan_varying_size(format);
2009                 unsigned size_alt = pan_varying_size(alt);
2010                 unsigned size = MIN2(size_main, size_alt);
2011 
2012                 /* If a varying is marked for XFB but not actually captured, we
2013                  * should match the format to the format that would otherwise
2014                  * be used for XFB, since dEQP checks for invariance here. It's
2015                  * unclear if this is required by the spec. */
2016 
2017                 if (xfb->so_mask & (1ull << loc)) {
2018                         struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
2019                         format = pan_xfb_format(format, o->num_components);
2020                         size = pan_varying_size(format);
2021                 } else if (size == size_alt) {
2022                         format = alt;
2023                 }
2024 
2025                 gen_offsets[idx] = *gen_stride;
2026                 gen_formats[other_idx] = format;
2027                 offset = *gen_stride;
2028                 *gen_stride += size;
2029         }
2030 
2031         return pan_emit_vary(present, PAN_VARY_GENERAL,
2032                         quirks, format, offset);
2033 }
2034 
2035 static void
pan_emit_special_input(union mali_attr * varyings,unsigned present,enum pan_special_varying v,mali_ptr addr)2036 pan_emit_special_input(union mali_attr *varyings,
2037                 unsigned present,
2038                 enum pan_special_varying v,
2039                 mali_ptr addr)
2040 {
2041         if (present & (1 << v)) {
2042                 /* Ensure we write exactly once for performance and with fields
2043                  * zeroed appropriately to avoid flakes */
2044 
2045                 union mali_attr s = {
2046                         .elements = addr
2047                 };
2048 
2049                 varyings[pan_varying_index(present, v)] = s;
2050         }
2051 }
2052 
2053 void
panfrost_emit_varying_descriptor(struct panfrost_batch * batch,unsigned vertex_count,struct mali_vertex_tiler_postfix * vertex_postfix,struct mali_vertex_tiler_postfix * tiler_postfix,union midgard_primitive_size * primitive_size)2054 panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
2055                                  unsigned vertex_count,
2056                                  struct mali_vertex_tiler_postfix *vertex_postfix,
2057                                  struct mali_vertex_tiler_postfix *tiler_postfix,
2058                                  union midgard_primitive_size *primitive_size)
2059 {
2060         /* Load the shaders */
2061         struct panfrost_context *ctx = batch->ctx;
2062         struct panfrost_device *dev = pan_device(ctx->base.screen);
2063         struct panfrost_shader_state *vs, *fs;
2064         size_t vs_size, fs_size;
2065 
2066         /* Allocate the varying descriptor */
2067 
2068         vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
2069         fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
2070         vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
2071         fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
2072 
2073         struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
2074                                                                      vs_size +
2075                                                                      fs_size);
2076 
2077         struct pipe_stream_output_info *so = &vs->stream_output;
2078         unsigned present = pan_varying_present(vs, fs, dev->quirks);
2079 
2080         /* Check if this varying is linked by us. This is the case for
2081          * general-purpose, non-captured varyings. If it is, link it. If it's
2082          * not, use the provided stream out information to determine the
2083          * offset, since it was already linked for us. */
2084 
2085         unsigned gen_offsets[32];
2086         enum mali_format gen_formats[32];
2087         memset(gen_offsets, 0, sizeof(gen_offsets));
2088         memset(gen_formats, 0, sizeof(gen_formats));
2089 
2090         unsigned gen_stride = 0;
2091         assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
2092         assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
2093 
2094         unsigned streamout_offsets[32];
2095 
2096         for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2097                 streamout_offsets[i] = panfrost_streamout_offset(
2098                                         so->stride[i],
2099                                         ctx->streamout.offsets[i],
2100                                         ctx->streamout.targets[i]);
2101         }
2102 
2103         struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
2104         struct mali_attr_meta *ofs = ovs + vs->varying_count;
2105 
2106         for (unsigned i = 0; i < vs->varying_count; i++) {
2107                 ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
2108                                 ctx->streamout.num_targets, streamout_offsets,
2109                                 dev->quirks,
2110                                 gen_offsets, gen_formats, &gen_stride, i, true, false);
2111         }
2112 
2113         for (unsigned i = 0; i < fs->varying_count; i++) {
2114                 ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
2115                                 ctx->streamout.num_targets, streamout_offsets,
2116                                 dev->quirks,
2117                                 gen_offsets, gen_formats, &gen_stride, i, false, true);
2118         }
2119 
2120         unsigned xfb_base = pan_xfb_base(present);
2121         struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
2122                         sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
2123         union mali_attr *varyings = (union mali_attr *) T.cpu;
2124 
2125         /* Emit the stream out buffers */
2126 
2127         unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
2128                                                            ctx->vertex_count);
2129 
2130         for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
2131                 panfrost_emit_streamout(batch, &varyings[xfb_base + i],
2132                                         so->stride[i],
2133                                         ctx->streamout.offsets[i],
2134                                         out_count,
2135                                         ctx->streamout.targets[i]);
2136         }
2137 
2138         panfrost_emit_varyings(batch,
2139                         &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
2140                         gen_stride, vertex_count);
2141 
2142         /* fp32 vec4 gl_Position */
2143         tiler_postfix->position_varying = panfrost_emit_varyings(batch,
2144                         &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
2145                         sizeof(float) * 4, vertex_count);
2146 
2147         if (present & (1 << PAN_VARY_PSIZ)) {
2148                 primitive_size->pointer = panfrost_emit_varyings(batch,
2149                                 &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
2150                                 2, vertex_count);
2151         }
2152 
2153         pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
2154         pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
2155         pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
2156 
2157         vertex_postfix->varyings = T.gpu;
2158         tiler_postfix->varyings = T.gpu;
2159 
2160         vertex_postfix->varying_meta = trans.gpu;
2161         tiler_postfix->varying_meta = trans.gpu + vs_size;
2162 }
2163 
2164 void
panfrost_emit_vertex_tiler_jobs(struct panfrost_batch * batch,struct mali_vertex_tiler_prefix * vertex_prefix,struct mali_vertex_tiler_postfix * vertex_postfix,struct mali_vertex_tiler_prefix * tiler_prefix,struct mali_vertex_tiler_postfix * tiler_postfix,union midgard_primitive_size * primitive_size)2165 panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
2166                                 struct mali_vertex_tiler_prefix *vertex_prefix,
2167                                 struct mali_vertex_tiler_postfix *vertex_postfix,
2168                                 struct mali_vertex_tiler_prefix *tiler_prefix,
2169                                 struct mali_vertex_tiler_postfix *tiler_postfix,
2170                                 union midgard_primitive_size *primitive_size)
2171 {
2172         struct panfrost_context *ctx = batch->ctx;
2173         struct panfrost_device *device = pan_device(ctx->base.screen);
2174         bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
2175         struct bifrost_payload_vertex bifrost_vertex = {0,};
2176         struct bifrost_payload_tiler bifrost_tiler = {0,};
2177         struct midgard_payload_vertex_tiler midgard_vertex = {0,};
2178         struct midgard_payload_vertex_tiler midgard_tiler = {0,};
2179         void *vp, *tp;
2180         size_t vp_size, tp_size;
2181 
2182         if (device->quirks & IS_BIFROST) {
2183                 bifrost_vertex.prefix = *vertex_prefix;
2184                 bifrost_vertex.postfix = *vertex_postfix;
2185                 vp = &bifrost_vertex;
2186                 vp_size = sizeof(bifrost_vertex);
2187 
2188                 bifrost_tiler.prefix = *tiler_prefix;
2189                 bifrost_tiler.tiler.primitive_size = *primitive_size;
2190                 bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
2191                 bifrost_tiler.postfix = *tiler_postfix;
2192                 tp = &bifrost_tiler;
2193                 tp_size = sizeof(bifrost_tiler);
2194         } else {
2195                 midgard_vertex.prefix = *vertex_prefix;
2196                 midgard_vertex.postfix = *vertex_postfix;
2197                 vp = &midgard_vertex;
2198                 vp_size = sizeof(midgard_vertex);
2199 
2200                 midgard_tiler.prefix = *tiler_prefix;
2201                 midgard_tiler.postfix = *tiler_postfix;
2202                 midgard_tiler.primitive_size = *primitive_size;
2203                 tp = &midgard_tiler;
2204                 tp_size = sizeof(midgard_tiler);
2205         }
2206 
2207         if (wallpapering) {
2208                 /* Inject in reverse order, with "predicted" job indices.
2209                  * THIS IS A HACK XXX */
2210                 panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_TILER, false,
2211                                  batch->scoreboard.job_index + 2, tp, tp_size, true);
2212                 panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_VERTEX, false, 0,
2213                                  vp, vp_size, true);
2214                 return;
2215         }
2216 
2217         /* If rasterizer discard is enable, only submit the vertex */
2218 
2219         bool rasterizer_discard = ctx->rasterizer &&
2220                                   ctx->rasterizer->base.rasterizer_discard;
2221 
2222         unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_VERTEX, false, 0,
2223                                            vp, vp_size, false);
2224 
2225         if (rasterizer_discard)
2226                 return;
2227 
2228         panfrost_new_job(&batch->pool, &batch->scoreboard, JOB_TYPE_TILER, false, vertex, tp, tp_size,
2229                          false);
2230 }
2231 
2232 /* TODO: stop hardcoding this */
2233 mali_ptr
panfrost_emit_sample_locations(struct panfrost_batch * batch)2234 panfrost_emit_sample_locations(struct panfrost_batch *batch)
2235 {
2236         uint16_t locations[] = {
2237             128, 128,
2238             0, 256,
2239             0, 256,
2240             0, 256,
2241             0, 256,
2242             0, 256,
2243             0, 256,
2244             0, 256,
2245             0, 256,
2246             0, 256,
2247             0, 256,
2248             0, 256,
2249             0, 256,
2250             0, 256,
2251             0, 256,
2252             0, 256,
2253             0, 256,
2254             0, 256,
2255             0, 256,
2256             0, 256,
2257             0, 256,
2258             0, 256,
2259             0, 256,
2260             0, 256,
2261             0, 256,
2262             0, 256,
2263             0, 256,
2264             0, 256,
2265             0, 256,
2266             0, 256,
2267             0, 256,
2268             0, 256,
2269             128, 128,
2270             0, 0,
2271             0, 0,
2272             0, 0,
2273             0, 0,
2274             0, 0,
2275             0, 0,
2276             0, 0,
2277             0, 0,
2278             0, 0,
2279             0, 0,
2280             0, 0,
2281             0, 0,
2282             0, 0,
2283             0, 0,
2284             0, 0,
2285         };
2286 
2287         return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
2288 }
2289