1 /*
2  * Copyright © 2021 Collabora Ltd.
3  *
4  * Derived from tu_shader.c which is:
5  * Copyright © 2019 Google LLC
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  */
26 
27 #include "genxml/gen_macros.h"
28 
29 #include "panvk_private.h"
30 
31 #include "nir_builder.h"
32 #include "nir_lower_blend.h"
33 #include "nir_conversion_builder.h"
34 #include "spirv/nir_spirv.h"
35 #include "util/mesa-sha1.h"
36 
37 #include "panfrost-quirks.h"
38 #include "pan_shader.h"
39 #include "util/pan_lower_framebuffer.h"
40 
41 #include "vk_util.h"
42 
43 static nir_shader *
panvk_spirv_to_nir(const void * code,size_t codesize,gl_shader_stage stage,const char * entry_point_name,const VkSpecializationInfo * spec_info,const nir_shader_compiler_options * nir_options)44 panvk_spirv_to_nir(const void *code,
45                    size_t codesize,
46                    gl_shader_stage stage,
47                    const char *entry_point_name,
48                    const VkSpecializationInfo *spec_info,
49                    const nir_shader_compiler_options *nir_options)
50 {
51    /* TODO these are made-up */
52    const struct spirv_to_nir_options spirv_options = {
53       .caps = { false },
54       .ubo_addr_format = nir_address_format_32bit_index_offset,
55       .ssbo_addr_format = nir_address_format_32bit_index_offset,
56    };
57 
58    /* convert VkSpecializationInfo */
59    uint32_t num_spec = 0;
60    struct nir_spirv_specialization *spec =
61       vk_spec_info_to_nir_spirv(spec_info, &num_spec);
62 
63    nir_shader *nir = spirv_to_nir(code, codesize / sizeof(uint32_t), spec,
64                                   num_spec, stage, entry_point_name,
65                                   &spirv_options, nir_options);
66 
67    free(spec);
68 
69    assert(nir->info.stage == stage);
70    nir_validate_shader(nir, "after spirv_to_nir");
71 
72    const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
73       .frag_coord = PAN_ARCH <= 5,
74       .point_coord = PAN_ARCH <= 5,
75       .front_face = PAN_ARCH <= 5,
76    };
77    NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
78 
79    return nir;
80 }
81 
82 struct panvk_lower_misc_ctx {
83    struct panvk_shader *shader;
84    const struct panvk_pipeline_layout *layout;
85 };
86 
87 static unsigned
get_fixed_sampler_index(nir_deref_instr * deref,const struct panvk_lower_misc_ctx * ctx)88 get_fixed_sampler_index(nir_deref_instr *deref,
89                         const struct panvk_lower_misc_ctx *ctx)
90 {
91    nir_variable *var = nir_deref_instr_get_variable(deref);
92    unsigned set = var->data.descriptor_set;
93    unsigned binding = var->data.binding;
94    const struct panvk_descriptor_set_binding_layout *bind_layout =
95       &ctx->layout->sets[set].layout->bindings[binding];
96 
97    return bind_layout->sampler_idx + ctx->layout->sets[set].sampler_offset;
98 }
99 
100 static unsigned
get_fixed_texture_index(nir_deref_instr * deref,const struct panvk_lower_misc_ctx * ctx)101 get_fixed_texture_index(nir_deref_instr *deref,
102                         const struct panvk_lower_misc_ctx *ctx)
103 {
104    nir_variable *var = nir_deref_instr_get_variable(deref);
105    unsigned set = var->data.descriptor_set;
106    unsigned binding = var->data.binding;
107    const struct panvk_descriptor_set_binding_layout *bind_layout =
108       &ctx->layout->sets[set].layout->bindings[binding];
109 
110    return bind_layout->tex_idx + ctx->layout->sets[set].tex_offset;
111 }
112 
113 static bool
lower_tex(nir_builder * b,nir_tex_instr * tex,const struct panvk_lower_misc_ctx * ctx)114 lower_tex(nir_builder *b, nir_tex_instr *tex,
115           const struct panvk_lower_misc_ctx *ctx)
116 {
117    bool progress = false;
118    int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
119 
120    b->cursor = nir_before_instr(&tex->instr);
121 
122    if (sampler_src_idx >= 0) {
123       nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
124       tex->sampler_index = get_fixed_sampler_index(deref, ctx);
125       nir_tex_instr_remove_src(tex, sampler_src_idx);
126       progress = true;
127    }
128 
129    int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
130    if (tex_src_idx >= 0) {
131       nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
132       tex->texture_index = get_fixed_texture_index(deref, ctx);
133       nir_tex_instr_remove_src(tex, tex_src_idx);
134       progress = true;
135    }
136 
137    return progress;
138 }
139 
140 static void
lower_vulkan_resource_index(nir_builder * b,nir_intrinsic_instr * intr,const struct panvk_lower_misc_ctx * ctx)141 lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *intr,
142                             const struct panvk_lower_misc_ctx *ctx)
143 {
144    nir_ssa_def *vulkan_idx = intr->src[0].ssa;
145 
146    unsigned set = nir_intrinsic_desc_set(intr);
147    unsigned binding = nir_intrinsic_binding(intr);
148    struct panvk_descriptor_set_layout *set_layout = ctx->layout->sets[set].layout;
149    struct panvk_descriptor_set_binding_layout *binding_layout =
150       &set_layout->bindings[binding];
151    unsigned base;
152 
153    switch (binding_layout->type) {
154    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
155    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
156       base = binding_layout->ubo_idx + ctx->layout->sets[set].ubo_offset;
157       break;
158    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
159    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
160       base = binding_layout->ssbo_idx + ctx->layout->sets[set].ssbo_offset;
161       break;
162    default:
163       unreachable("Invalid descriptor type");
164       break;
165    }
166 
167    b->cursor = nir_before_instr(&intr->instr);
168    nir_ssa_def *idx = nir_iadd(b, nir_imm_int(b, base), vulkan_idx);
169    nir_ssa_def_rewrite_uses(&intr->dest.ssa, idx);
170    nir_instr_remove(&intr->instr);
171 }
172 
173 static void
lower_load_vulkan_descriptor(nir_builder * b,nir_intrinsic_instr * intrin)174 lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin)
175 {
176    /* Loading the descriptor happens as part of the load/store instruction so
177     * this is a no-op.
178     */
179    b->cursor = nir_before_instr(&intrin->instr);
180    nir_ssa_def *val = nir_vec2(b, intrin->src[0].ssa, nir_imm_int(b, 0));
181    nir_ssa_def_rewrite_uses(&intrin->dest.ssa, val);
182    nir_instr_remove(&intrin->instr);
183 }
184 
185 static bool
lower_intrinsic(nir_builder * b,nir_intrinsic_instr * intr,const struct panvk_lower_misc_ctx * ctx)186 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
187                 const struct panvk_lower_misc_ctx *ctx)
188 {
189    switch (intr->intrinsic) {
190    case nir_intrinsic_vulkan_resource_index:
191       lower_vulkan_resource_index(b, intr, ctx);
192       return true;
193    case nir_intrinsic_load_vulkan_descriptor:
194       lower_load_vulkan_descriptor(b, intr);
195       return true;
196    default:
197       return false;
198    }
199 
200 }
201 
202 static bool
panvk_lower_misc_instr(nir_builder * b,nir_instr * instr,void * data)203 panvk_lower_misc_instr(nir_builder *b,
204                        nir_instr *instr,
205                        void *data)
206 {
207    const struct panvk_lower_misc_ctx *ctx = data;
208 
209    switch (instr->type) {
210    case nir_instr_type_tex:
211       return lower_tex(b, nir_instr_as_tex(instr), ctx);
212    case nir_instr_type_intrinsic:
213       return lower_intrinsic(b, nir_instr_as_intrinsic(instr), ctx);
214    default:
215       return false;
216    }
217 }
218 
219 static bool
panvk_lower_misc(nir_shader * nir,const struct panvk_lower_misc_ctx * ctx)220 panvk_lower_misc(nir_shader *nir, const struct panvk_lower_misc_ctx *ctx)
221 {
222    return nir_shader_instructions_pass(nir, panvk_lower_misc_instr,
223                                        nir_metadata_block_index |
224                                        nir_metadata_dominance,
225                                        (void *)ctx);
226 }
227 
228 static bool
panvk_inline_blend_constants(nir_builder * b,nir_instr * instr,void * data)229 panvk_inline_blend_constants(nir_builder *b, nir_instr *instr, void *data)
230 {
231    if (instr->type != nir_instr_type_intrinsic)
232       return false;
233 
234    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
235    if (intr->intrinsic != nir_intrinsic_load_blend_const_color_rgba)
236       return false;
237 
238    const nir_const_value *constants = data;
239 
240    b->cursor = nir_after_instr(instr);
241    nir_ssa_def *constant = nir_build_imm(b, 4, 32, constants);
242    nir_ssa_def_rewrite_uses(&intr->dest.ssa, constant);
243    nir_instr_remove(instr);
244    return true;
245 }
246 
247 #if PAN_ARCH <= 5
248 struct panvk_lower_blend_type_conv {
249    nir_variable *var;
250    nir_alu_type newtype;
251    nir_alu_type oldtype;
252 };
253 
254 static bool
panvk_adjust_rt_type(nir_builder * b,nir_instr * instr,void * data)255 panvk_adjust_rt_type(nir_builder *b, nir_instr *instr, void *data)
256 {
257    if (instr->type != nir_instr_type_intrinsic)
258       return false;
259 
260    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
261    if (intr->intrinsic != nir_intrinsic_store_deref &&
262        intr->intrinsic != nir_intrinsic_load_deref)
263       return false;
264 
265    nir_variable *var = nir_intrinsic_get_var(intr, 0);
266    if (var->data.mode != nir_var_shader_out ||
267        (var->data.location != FRAG_RESULT_COLOR &&
268         var->data.location < FRAG_RESULT_DATA0))
269       return false;
270 
271    /* Determine render target for per-RT blending */
272    unsigned rt =
273       (var->data.location == FRAG_RESULT_COLOR) ? 0 :
274       (var->data.location - FRAG_RESULT_DATA0);
275 
276    const struct panvk_lower_blend_type_conv *typeconv = data;
277    nir_alu_type newtype = typeconv[rt].newtype;
278    nir_alu_type oldtype = typeconv[rt].oldtype;
279 
280    /* No conversion */
281    if (newtype == nir_type_invalid || newtype == oldtype)
282       return false;
283 
284 
285    b->cursor = nir_before_instr(instr);
286 
287    nir_deref_instr *deref = nir_build_deref_var(b, typeconv[rt].var);
288    nir_instr_rewrite_src(&intr->instr, &intr->src[0],
289                          nir_src_for_ssa(&deref->dest.ssa));
290 
291    if (intr->intrinsic == nir_intrinsic_store_deref) {
292       nir_ssa_def *val = nir_ssa_for_src(b, intr->src[1], 4);
293       bool clamp = nir_alu_type_get_base_type(newtype) != nir_type_float;
294       val = nir_convert_with_rounding(b, val, oldtype, newtype,
295                                       nir_rounding_mode_undef, clamp);
296       nir_store_var(b, typeconv[rt].var, val, nir_intrinsic_write_mask(intr));
297    } else {
298       bool clamp = nir_alu_type_get_base_type(oldtype) != nir_type_float;
299       nir_ssa_def *val = nir_load_var(b, typeconv[rt].var);
300       val = nir_convert_with_rounding(b, val, newtype, oldtype,
301                                       nir_rounding_mode_undef, clamp);
302       nir_ssa_def_rewrite_uses(&intr->dest.ssa, val);
303    }
304 
305    nir_instr_remove(instr);
306 
307    return true;
308 }
309 #endif
310 
311 static void
panvk_lower_blend(struct panfrost_device * pdev,nir_shader * nir,struct panfrost_compile_inputs * inputs,struct pan_blend_state * blend_state,bool static_blend_constants)312 panvk_lower_blend(struct panfrost_device *pdev,
313                   nir_shader *nir,
314                   struct panfrost_compile_inputs *inputs,
315                   struct pan_blend_state *blend_state,
316                   bool static_blend_constants)
317 {
318    nir_lower_blend_options options = {
319       .logicop_enable = blend_state->logicop_enable,
320       .logicop_func = blend_state->logicop_func,
321    };
322 
323 #if PAN_ARCH <= 5
324    struct panvk_lower_blend_type_conv typeconv[8] = { 0 };
325 #endif
326    bool lower_blend = false;
327 
328    for (unsigned rt = 0; rt < blend_state->rt_count; rt++) {
329       struct pan_blend_rt_state *rt_state = &blend_state->rts[rt];
330 
331       if (!panvk_per_arch(blend_needs_lowering)(pdev, blend_state, rt))
332          continue;
333 
334       enum pipe_format fmt = rt_state->format;
335 
336       options.format[rt] = fmt;
337       options.rt[rt].colormask = rt_state->equation.color_mask;
338 
339       if (!rt_state->equation.blend_enable) {
340          static const nir_lower_blend_channel replace = {
341             .func = BLEND_FUNC_ADD,
342             .src_factor = BLEND_FACTOR_ZERO,
343             .invert_src_factor = true,
344             .dst_factor = BLEND_FACTOR_ZERO,
345             .invert_dst_factor = false,
346          };
347 
348          options.rt[rt].rgb = replace;
349          options.rt[rt].alpha = replace;
350       } else {
351          options.rt[rt].rgb.func = rt_state->equation.rgb_func;
352          options.rt[rt].rgb.src_factor = rt_state->equation.rgb_src_factor;
353          options.rt[rt].rgb.invert_src_factor = rt_state->equation.rgb_invert_src_factor;
354          options.rt[rt].rgb.dst_factor = rt_state->equation.rgb_dst_factor;
355          options.rt[rt].rgb.invert_dst_factor = rt_state->equation.rgb_invert_dst_factor;
356          options.rt[rt].alpha.func = rt_state->equation.alpha_func;
357          options.rt[rt].alpha.src_factor = rt_state->equation.alpha_src_factor;
358          options.rt[rt].alpha.invert_src_factor = rt_state->equation.alpha_invert_src_factor;
359          options.rt[rt].alpha.dst_factor = rt_state->equation.alpha_dst_factor;
360          options.rt[rt].alpha.invert_dst_factor = rt_state->equation.alpha_invert_dst_factor;
361       }
362 
363       /* Update the equation to force a color replacement */
364       rt_state->equation.color_mask = 0xf;
365       rt_state->equation.rgb_func = BLEND_FUNC_ADD;
366       rt_state->equation.rgb_src_factor = BLEND_FACTOR_ZERO;
367       rt_state->equation.rgb_invert_src_factor = true;
368       rt_state->equation.rgb_dst_factor = BLEND_FACTOR_ZERO;
369       rt_state->equation.rgb_invert_dst_factor = false;
370       rt_state->equation.alpha_func = BLEND_FUNC_ADD;
371       rt_state->equation.alpha_src_factor = BLEND_FACTOR_ZERO;
372       rt_state->equation.alpha_invert_src_factor = true;
373       rt_state->equation.alpha_dst_factor = BLEND_FACTOR_ZERO;
374       rt_state->equation.alpha_invert_dst_factor = false;
375       lower_blend = true;
376 
377 #if PAN_ARCH >= 6
378       inputs->bifrost.static_rt_conv = true;
379       inputs->bifrost.rt_conv[rt] =
380          GENX(pan_blend_get_internal_desc)(pdev, fmt, rt, 32, false) >> 32;
381 #else
382       if (!panfrost_blendable_formats_v6[fmt].internal) {
383          nir_variable *outvar =
384             nir_find_variable_with_location(nir, nir_var_shader_out, FRAG_RESULT_DATA0 + rt);
385          if (!outvar && !rt)
386             outvar = nir_find_variable_with_location(nir, nir_var_shader_out, FRAG_RESULT_COLOR);
387 
388          assert(outvar);
389 
390          const struct util_format_description *format_desc =
391             util_format_description(fmt);
392 
393          typeconv[rt].newtype = pan_unpacked_type_for_format(format_desc);
394          typeconv[rt].oldtype = nir_get_nir_type_for_glsl_type(outvar->type);
395          typeconv[rt].var =
396             nir_variable_create(nir, nir_var_shader_out,
397                                 glsl_vector_type(nir_get_glsl_base_type_for_nir_type(typeconv[rt].newtype),
398                                                  glsl_get_vector_elements(outvar->type)),
399                                 outvar->name);
400          typeconv[rt].var->data.location = outvar->data.location;
401          inputs->blend.nr_samples = rt_state->nr_samples;
402          inputs->rt_formats[rt] = rt_state->format;
403       }
404 #endif
405    }
406 
407    if (lower_blend) {
408 #if PAN_ARCH <= 5
409       NIR_PASS_V(nir, nir_shader_instructions_pass,
410                  panvk_adjust_rt_type,
411                  nir_metadata_block_index |
412                  nir_metadata_dominance,
413                  &typeconv);
414       nir_remove_dead_derefs(nir);
415       nir_remove_dead_variables(nir, nir_var_shader_out, NULL);
416 #endif
417 
418       NIR_PASS_V(nir, nir_lower_blend, options);
419 
420       if (static_blend_constants) {
421          const nir_const_value constants[4] = {
422             { .f32 = CLAMP(blend_state->constants[0], 0.0f, 1.0f) },
423             { .f32 = CLAMP(blend_state->constants[1], 0.0f, 1.0f) },
424             { .f32 = CLAMP(blend_state->constants[2], 0.0f, 1.0f) },
425             { .f32 = CLAMP(blend_state->constants[3], 0.0f, 1.0f) },
426          };
427          NIR_PASS_V(nir, nir_shader_instructions_pass,
428                     panvk_inline_blend_constants,
429                     nir_metadata_block_index |
430                     nir_metadata_dominance,
431                     (void *)constants);
432       }
433    }
434 }
435 
436 struct panvk_shader *
panvk_per_arch(shader_create)437 panvk_per_arch(shader_create)(struct panvk_device *dev,
438                               gl_shader_stage stage,
439                               const VkPipelineShaderStageCreateInfo *stage_info,
440                               const struct panvk_pipeline_layout *layout,
441                               unsigned sysval_ubo,
442                               struct pan_blend_state *blend_state,
443                               bool static_blend_constants,
444                               const VkAllocationCallbacks *alloc)
445 {
446    const struct panvk_shader_module *module = panvk_shader_module_from_handle(stage_info->module);
447    struct panfrost_device *pdev = &dev->physical_device->pdev;
448    struct panvk_shader *shader;
449 
450    shader = vk_zalloc2(&dev->vk.alloc, alloc, sizeof(*shader), 8,
451                        VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
452    if (!shader)
453       return NULL;
454 
455    util_dynarray_init(&shader->binary, NULL);
456 
457    /* translate SPIR-V to NIR */
458    assert(module->code_size % 4 == 0);
459    nir_shader *nir = panvk_spirv_to_nir(module->code,
460                                         module->code_size,
461                                         stage, stage_info->pName,
462                                         stage_info->pSpecializationInfo,
463                                         GENX(pan_shader_get_compiler_options)());
464    if (!nir) {
465       vk_free2(&dev->vk.alloc, alloc, shader);
466       return NULL;
467    }
468 
469    struct panfrost_compile_inputs inputs = {
470       .gpu_id = pdev->gpu_id,
471       .no_ubo_to_push = true,
472       .sysval_ubo = sysval_ubo,
473    };
474 
475    /* multi step inlining procedure */
476    NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
477    NIR_PASS_V(nir, nir_lower_returns);
478    NIR_PASS_V(nir, nir_inline_functions);
479    NIR_PASS_V(nir, nir_copy_prop);
480    NIR_PASS_V(nir, nir_opt_deref);
481    foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
482       if (!func->is_entrypoint)
483          exec_node_remove(&func->node);
484    }
485    assert(exec_list_length(&nir->functions) == 1);
486    NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
487 
488    /* Split member structs.  We do this before lower_io_to_temporaries so that
489     * it doesn't lower system values to temporaries by accident.
490     */
491    NIR_PASS_V(nir, nir_split_var_copies);
492    NIR_PASS_V(nir, nir_split_per_member_structs);
493 
494    NIR_PASS_V(nir, nir_remove_dead_variables,
495               nir_var_shader_in | nir_var_shader_out |
496               nir_var_system_value | nir_var_mem_shared,
497               NULL);
498 
499    NIR_PASS_V(nir, nir_lower_io_to_temporaries,
500               nir_shader_get_entrypoint(nir), true, true);
501 
502    NIR_PASS_V(nir, nir_lower_indirect_derefs,
503               nir_var_shader_in | nir_var_shader_out,
504               UINT32_MAX);
505 
506    NIR_PASS_V(nir, nir_opt_copy_prop_vars);
507    NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
508 
509    if (stage == MESA_SHADER_FRAGMENT)
510       panvk_lower_blend(pdev, nir, &inputs, blend_state, static_blend_constants);
511 
512    NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, true, false);
513    NIR_PASS_V(nir, nir_lower_explicit_io,
514               nir_var_mem_ubo | nir_var_mem_ssbo,
515               nir_address_format_32bit_index_offset);
516 
517    nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, stage);
518    nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs, stage);
519 
520    NIR_PASS_V(nir, nir_lower_system_values);
521    NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
522 
523    NIR_PASS_V(nir, nir_split_var_copies);
524    NIR_PASS_V(nir, nir_lower_var_copies);
525 
526    struct panvk_lower_misc_ctx ctx = {
527       .shader = shader,
528       .layout = layout,
529    };
530    NIR_PASS_V(nir, panvk_lower_misc, &ctx);
531 
532    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
533    if (unlikely(dev->physical_device->instance->debug_flags & PANVK_DEBUG_NIR)) {
534       fprintf(stderr, "translated nir:\n");
535       nir_print_shader(nir, stderr);
536    }
537 
538    GENX(pan_shader_compile)(nir, &inputs, &shader->binary, &shader->info);
539 
540    /* Patch the descriptor count */
541    shader->info.ubo_count =
542       shader->info.sysvals.sysval_count ? sysval_ubo + 1 : layout->num_ubos;
543    shader->info.sampler_count = layout->num_samplers;
544    shader->info.texture_count = layout->num_textures;
545 
546    shader->sysval_ubo = sysval_ubo;
547 
548    ralloc_free(nir);
549 
550    return shader;
551 }
552