1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 /*
26  * This is ported mostly out of radeonsi, if we can drop TGSI, we can likely
27  * make a lot this go away.
28  */
29 
30 #include "nir_to_tgsi_info.h"
31 #include "util/u_math.h"
32 #include "nir.h"
33 #include "nir_deref.h"
34 #include "tgsi/tgsi_scan.h"
35 #include "tgsi/tgsi_from_mesa.h"
36 
tex_get_texture_var(nir_tex_instr * instr)37 static nir_variable* tex_get_texture_var(nir_tex_instr *instr)
38 {
39    for (unsigned i = 0; i < instr->num_srcs; i++) {
40       switch (instr->src[i].src_type) {
41       case nir_tex_src_texture_deref:
42          return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src));
43       default:
44          break;
45       }
46    }
47 
48    return NULL;
49 }
50 
intrinsic_get_var(nir_intrinsic_instr * instr)51 static nir_variable* intrinsic_get_var(nir_intrinsic_instr *instr)
52 {
53    return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
54 }
55 
56 
gather_usage_helper(const nir_deref_instr ** deref_ptr,unsigned location,uint8_t mask,uint8_t * usage_mask)57 static void gather_usage_helper(const nir_deref_instr **deref_ptr,
58                                 unsigned location,
59                                 uint8_t mask,
60                                 uint8_t *usage_mask)
61 {
62    for (; *deref_ptr; deref_ptr++) {
63       const nir_deref_instr *deref = *deref_ptr;
64       switch (deref->deref_type) {
65       case nir_deref_type_array: {
66          bool is_compact = nir_deref_instr_get_variable(deref)->data.compact;
67          unsigned elem_size = is_compact ? DIV_ROUND_UP(glsl_get_length(deref->type), 4) :
68             glsl_count_attribute_slots(deref->type, false);
69          if (nir_src_is_const(deref->arr.index)) {
70             if (is_compact) {
71                location += nir_src_as_uint(deref->arr.index) / 4;
72                mask <<= nir_src_as_uint(deref->arr.index) % 4;
73             } else
74                location += elem_size * nir_src_as_uint(deref->arr.index);
75          } else {
76             unsigned array_elems =
77                glsl_get_length(deref_ptr[-1]->type);
78             for (unsigned i = 0; i < array_elems; i++) {
79                gather_usage_helper(deref_ptr + 1,
80                                    location + elem_size * i,
81                                    mask, usage_mask);
82             }
83             return;
84          }
85          break;
86       }
87       case nir_deref_type_struct: {
88          const struct glsl_type *parent_type =
89             deref_ptr[-1]->type;
90          unsigned index = deref->strct.index;
91          for (unsigned i = 0; i < index; i++) {
92             const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
93             location += glsl_count_attribute_slots(ft, false);
94          }
95          break;
96       }
97       default:
98          unreachable("Unhandled deref type in gather_components_used_helper");
99       }
100    }
101 
102    usage_mask[location] |= mask & 0xf;
103    if (mask & 0xf0)
104       usage_mask[location + 1] |= (mask >> 4) & 0xf;
105 }
106 
gather_usage(const nir_deref_instr * deref,uint8_t mask,uint8_t * usage_mask)107 static void gather_usage(const nir_deref_instr *deref,
108                          uint8_t mask,
109                          uint8_t *usage_mask)
110 {
111    nir_deref_path path;
112    nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
113 
114    unsigned location_frac = path.path[0]->var->data.location_frac;
115    if (glsl_type_is_64bit(deref->type)) {
116       uint8_t new_mask = 0;
117       for (unsigned i = 0; i < 4; i++) {
118          if (mask & (1 << i))
119             new_mask |= 0x3 << (2 * i);
120       }
121       mask = new_mask << location_frac;
122    } else {
123       mask <<= location_frac;
124       mask &= 0xf;
125    }
126 
127    gather_usage_helper((const nir_deref_instr **)&path.path[1],
128                        path.path[0]->var->data.driver_location,
129                        mask, usage_mask);
130 
131    nir_deref_path_finish(&path);
132 }
133 
gather_intrinsic_load_deref_info(const nir_shader * nir,const nir_intrinsic_instr * instr,const nir_deref_instr * deref,bool need_texcoord,nir_variable * var,struct tgsi_shader_info * info)134 static void gather_intrinsic_load_deref_info(const nir_shader *nir,
135                                              const nir_intrinsic_instr *instr,
136                                              const nir_deref_instr *deref,
137                                              bool need_texcoord,
138                                              nir_variable *var,
139                                              struct tgsi_shader_info *info)
140 {
141    assert(var && var->data.mode == nir_var_shader_in);
142 
143    if (nir->info.stage == MESA_SHADER_FRAGMENT)
144       gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa),
145                    info->input_usage_mask);
146 
147    switch (nir->info.stage) {
148    case MESA_SHADER_VERTEX: {
149 
150       break;
151    }
152    default: {
153       unsigned semantic_name, semantic_index;
154       tgsi_get_gl_varying_semantic(var->data.location, need_texcoord,
155                                    &semantic_name, &semantic_index);
156 
157       if (semantic_name == TGSI_SEMANTIC_COLOR) {
158          uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
159          info->colors_read |= mask << (semantic_index * 4);
160       }
161       if (semantic_name == TGSI_SEMANTIC_FACE) {
162          info->uses_frontface = true;
163       }
164       break;
165    }
166    }
167 }
168 
scan_instruction(const struct nir_shader * nir,bool need_texcoord,struct tgsi_shader_info * info,nir_instr * instr)169 static void scan_instruction(const struct nir_shader *nir,
170                              bool need_texcoord,
171                              struct tgsi_shader_info *info,
172                              nir_instr *instr)
173 {
174    if (instr->type == nir_instr_type_alu) {
175       nir_alu_instr *alu = nir_instr_as_alu(instr);
176 
177       switch (alu->op) {
178       case nir_op_fddx:
179       case nir_op_fddy:
180       case nir_op_fddx_fine:
181       case nir_op_fddy_fine:
182       case nir_op_fddx_coarse:
183       case nir_op_fddy_coarse:
184          info->uses_derivatives = true;
185          break;
186       default:
187          break;
188       }
189    } else if (instr->type == nir_instr_type_tex) {
190       nir_tex_instr *tex = nir_instr_as_tex(instr);
191       nir_variable *texture = tex_get_texture_var(tex);
192 
193       if (!texture) {
194          info->samplers_declared |=
195             u_bit_consecutive(tex->sampler_index, 1);
196       } else {
197          if (texture->data.bindless)
198             info->uses_bindless_samplers = true;
199       }
200 
201       switch (tex->op) {
202       case nir_texop_tex:
203       case nir_texop_txb:
204       case nir_texop_lod:
205          info->uses_derivatives = true;
206          break;
207       default:
208          break;
209       }
210    } else if (instr->type == nir_instr_type_intrinsic) {
211       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
212 
213       switch (intr->intrinsic) {
214       case nir_intrinsic_load_front_face:
215          info->uses_frontface = 1;
216          break;
217       case nir_intrinsic_load_instance_id:
218          info->uses_instanceid = 1;
219          break;
220       case nir_intrinsic_load_invocation_id:
221          info->uses_invocationid = true;
222          break;
223       case nir_intrinsic_load_num_workgroups:
224          info->uses_grid_size = true;
225          break;
226       case nir_intrinsic_load_workgroup_size:
227          /* The block size is translated to IMM with a fixed block size. */
228          if (info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
229             info->uses_block_size = true;
230          break;
231       case nir_intrinsic_load_local_invocation_id:
232       case nir_intrinsic_load_workgroup_id: {
233          unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
234          while (mask) {
235             unsigned i = u_bit_scan(&mask);
236 
237             if (intr->intrinsic == nir_intrinsic_load_workgroup_id)
238                info->uses_block_id[i] = true;
239             else
240                info->uses_thread_id[i] = true;
241          }
242          break;
243       }
244       case nir_intrinsic_load_vertex_id:
245          info->uses_vertexid = 1;
246          break;
247       case nir_intrinsic_load_vertex_id_zero_base:
248          info->uses_vertexid_nobase = 1;
249          break;
250       case nir_intrinsic_load_base_vertex:
251          info->uses_basevertex = 1;
252          break;
253       case nir_intrinsic_load_draw_id:
254          info->uses_drawid = 1;
255          break;
256       case nir_intrinsic_load_primitive_id:
257          info->uses_primid = 1;
258          break;
259       case nir_intrinsic_load_sample_mask_in:
260          info->reads_samplemask = true;
261          break;
262       case nir_intrinsic_load_tess_level_inner:
263       case nir_intrinsic_load_tess_level_outer:
264          info->reads_tess_factors = true;
265          break;
266       case nir_intrinsic_bindless_image_load:
267          info->uses_bindless_images = true;
268 
269          if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
270             info->uses_bindless_buffer_load = true;
271          else
272             info->uses_bindless_image_load = true;
273          break;
274       case nir_intrinsic_bindless_image_size:
275       case nir_intrinsic_bindless_image_samples:
276          info->uses_bindless_images = true;
277          break;
278       case nir_intrinsic_bindless_image_store:
279          info->uses_bindless_images = true;
280 
281          if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
282             info->uses_bindless_buffer_store = true;
283          else
284             info->uses_bindless_image_store = true;
285 
286          info->writes_memory = true;
287          break;
288       case nir_intrinsic_image_deref_store:
289          info->writes_memory = true;
290          break;
291       case nir_intrinsic_bindless_image_atomic_add:
292       case nir_intrinsic_bindless_image_atomic_imin:
293       case nir_intrinsic_bindless_image_atomic_imax:
294       case nir_intrinsic_bindless_image_atomic_umin:
295       case nir_intrinsic_bindless_image_atomic_umax:
296       case nir_intrinsic_bindless_image_atomic_and:
297       case nir_intrinsic_bindless_image_atomic_or:
298       case nir_intrinsic_bindless_image_atomic_xor:
299       case nir_intrinsic_bindless_image_atomic_exchange:
300       case nir_intrinsic_bindless_image_atomic_comp_swap:
301          info->uses_bindless_images = true;
302 
303          if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
304             info->uses_bindless_buffer_atomic = true;
305          else
306             info->uses_bindless_image_atomic = true;
307 
308          info->writes_memory = true;
309          break;
310       case nir_intrinsic_image_deref_atomic_add:
311       case nir_intrinsic_image_deref_atomic_imin:
312       case nir_intrinsic_image_deref_atomic_imax:
313       case nir_intrinsic_image_deref_atomic_umin:
314       case nir_intrinsic_image_deref_atomic_umax:
315       case nir_intrinsic_image_deref_atomic_and:
316       case nir_intrinsic_image_deref_atomic_or:
317       case nir_intrinsic_image_deref_atomic_xor:
318       case nir_intrinsic_image_deref_atomic_exchange:
319       case nir_intrinsic_image_deref_atomic_comp_swap:
320          info->writes_memory = true;
321          break;
322       case nir_intrinsic_store_ssbo:
323       case nir_intrinsic_ssbo_atomic_add:
324       case nir_intrinsic_ssbo_atomic_imin:
325       case nir_intrinsic_ssbo_atomic_umin:
326       case nir_intrinsic_ssbo_atomic_imax:
327       case nir_intrinsic_ssbo_atomic_umax:
328       case nir_intrinsic_ssbo_atomic_and:
329       case nir_intrinsic_ssbo_atomic_or:
330       case nir_intrinsic_ssbo_atomic_xor:
331       case nir_intrinsic_ssbo_atomic_exchange:
332       case nir_intrinsic_ssbo_atomic_comp_swap:
333          info->writes_memory = true;
334          break;
335       case nir_intrinsic_load_deref: {
336          nir_variable *var = intrinsic_get_var(intr);
337          nir_variable_mode mode = var->data.mode;
338          nir_deref_instr *const deref = nir_src_as_deref(intr->src[0]);
339          enum glsl_base_type base_type =
340             glsl_get_base_type(glsl_without_array(var->type));
341 
342          if (nir_deref_instr_has_indirect(deref)) {
343             if (mode == nir_var_shader_in)
344                info->indirect_files |= (1 << TGSI_FILE_INPUT);
345          }
346          if (mode == nir_var_shader_in) {
347             gather_intrinsic_load_deref_info(nir, intr, deref, need_texcoord, var, info);
348 
349             switch (var->data.interpolation) {
350             case INTERP_MODE_NONE:
351                if (glsl_base_type_is_integer(base_type))
352                   break;
353 
354                FALLTHROUGH;
355             case INTERP_MODE_SMOOTH:
356                if (var->data.sample)
357                   info->uses_persp_sample = true;
358                else if (var->data.centroid)
359                   info->uses_persp_centroid = true;
360                else
361                   info->uses_persp_center = true;
362                break;
363 
364             case INTERP_MODE_NOPERSPECTIVE:
365                if (var->data.sample)
366                   info->uses_linear_sample = true;
367                else if (var->data.centroid)
368                   info->uses_linear_centroid = true;
369                else
370                   info->uses_linear_center = true;
371                break;
372             }
373          }
374          break;
375       }
376       case nir_intrinsic_interp_deref_at_centroid:
377       case nir_intrinsic_interp_deref_at_sample:
378       case nir_intrinsic_interp_deref_at_offset: {
379          enum glsl_interp_mode interp = intrinsic_get_var(intr)->data.interpolation;
380          switch (interp) {
381          case INTERP_MODE_SMOOTH:
382          case INTERP_MODE_NONE:
383             if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
384                info->uses_persp_opcode_interp_centroid = true;
385             else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
386                info->uses_persp_opcode_interp_sample = true;
387             else
388                info->uses_persp_opcode_interp_offset = true;
389             break;
390          case INTERP_MODE_NOPERSPECTIVE:
391             if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
392                info->uses_linear_opcode_interp_centroid = true;
393             else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
394                info->uses_linear_opcode_interp_sample = true;
395             else
396                info->uses_linear_opcode_interp_offset = true;
397             break;
398          case INTERP_MODE_FLAT:
399             break;
400          default:
401             unreachable("Unsupported interpoation type");
402          }
403          break;
404       }
405       default:
406          break;
407       }
408    }
409 }
410 
nir_tgsi_scan_shader(const struct nir_shader * nir,struct tgsi_shader_info * info,bool need_texcoord)411 void nir_tgsi_scan_shader(const struct nir_shader *nir,
412                           struct tgsi_shader_info *info,
413                           bool need_texcoord)
414 {
415    nir_function *func;
416    unsigned i;
417 
418    info->processor = pipe_shader_type_from_mesa(nir->info.stage);
419    info->num_tokens = 2; /* indicate that the shader is non-empty */
420    info->num_instructions = 2;
421 
422    info->properties[TGSI_PROPERTY_NEXT_SHADER] =
423       pipe_shader_type_from_mesa(nir->info.next_stage);
424 
425    if (nir->info.stage == MESA_SHADER_VERTEX) {
426       info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
427          nir->info.vs.window_space_position;
428    }
429 
430    if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
431       info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
432          nir->info.tess.tcs_vertices_out;
433    }
434 
435    if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
436       if (nir->info.tess.primitive_mode == GL_ISOLINES)
437          info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = PIPE_PRIM_LINES;
438       else
439          info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = nir->info.tess.primitive_mode;
440 
441       STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
442       STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
443                     PIPE_TESS_SPACING_FRACTIONAL_ODD);
444       STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
445                     PIPE_TESS_SPACING_FRACTIONAL_EVEN);
446 
447       info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
448       info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
449       info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
450    }
451 
452    if (nir->info.stage == MESA_SHADER_GEOMETRY) {
453       info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
454       info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
455       info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
456       info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
457    }
458 
459    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
460       info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
461          nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
462       info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
463 
464       if (nir->info.fs.pixel_center_integer) {
465          info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
466             TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
467       }
468 
469       if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
470          switch (nir->info.fs.depth_layout) {
471          case FRAG_DEPTH_LAYOUT_ANY:
472             info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
473             break;
474          case FRAG_DEPTH_LAYOUT_GREATER:
475             info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
476             break;
477          case FRAG_DEPTH_LAYOUT_LESS:
478             info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
479             break;
480          case FRAG_DEPTH_LAYOUT_UNCHANGED:
481             info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
482             break;
483          default:
484             unreachable("Unknow depth layout");
485          }
486       }
487    }
488 
489    if (gl_shader_stage_is_compute(nir->info.stage)) {
490       info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.workgroup_size[0];
491       info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.workgroup_size[1];
492       info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.workgroup_size[2];
493    }
494 
495    i = 0;
496    uint64_t processed_inputs = 0;
497    nir_foreach_shader_in_variable(variable, nir) {
498       unsigned semantic_name, semantic_index;
499 
500       const struct glsl_type *type = variable->type;
501       if (nir_is_arrayed_io(variable, nir->info.stage)) {
502          assert(glsl_type_is_array(type));
503          type = glsl_get_array_element(type);
504       }
505 
506       unsigned attrib_count = variable->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4) :
507          glsl_count_attribute_slots(type, nir->info.stage == MESA_SHADER_VERTEX);
508 
509       i = variable->data.driver_location;
510 
511       /* Vertex shader inputs don't have semantics. The state
512        * tracker has already mapped them to attributes via
513        * variable->data.driver_location.
514        */
515       if (nir->info.stage == MESA_SHADER_VERTEX) {
516          continue;
517       }
518 
519       for (unsigned j = 0; j < attrib_count; j++, i++) {
520 
521          if (processed_inputs & ((uint64_t)1 << i))
522             continue;
523 
524          processed_inputs |= ((uint64_t)1 << i);
525 
526          tgsi_get_gl_varying_semantic(variable->data.location + j, need_texcoord,
527                                       &semantic_name, &semantic_index);
528 
529          info->input_semantic_name[i] = semantic_name;
530          info->input_semantic_index[i] = semantic_index;
531 
532          if (semantic_name == TGSI_SEMANTIC_PRIMID)
533             info->uses_primid = true;
534 
535          enum glsl_base_type base_type =
536             glsl_get_base_type(glsl_without_array(variable->type));
537 
538          if (variable->data.centroid)
539             info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
540          if (variable->data.sample)
541             info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
542 
543          switch (variable->data.interpolation) {
544          case INTERP_MODE_NONE:
545             if (glsl_base_type_is_integer(base_type)) {
546                info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
547                break;
548             }
549 
550             if (semantic_name == TGSI_SEMANTIC_COLOR) {
551                info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
552                break;
553             }
554             FALLTHROUGH;
555 
556          case INTERP_MODE_SMOOTH:
557             assert(!glsl_base_type_is_integer(base_type));
558 
559             info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
560             break;
561 
562          case INTERP_MODE_NOPERSPECTIVE:
563             assert(!glsl_base_type_is_integer(base_type));
564 
565             info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
566             break;
567 
568          case INTERP_MODE_FLAT:
569             info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
570             break;
571          }
572       }
573    }
574 
575    info->num_inputs = nir->num_inputs;
576    if (nir->info.io_lowered) {
577       info->num_inputs = util_bitcount64(nir->info.inputs_read);
578       if (nir->info.inputs_read_indirectly)
579          info->indirect_files |= 1 << TGSI_FILE_INPUT;
580       info->file_max[TGSI_FILE_INPUT] = info->num_inputs - 1;
581    } else {
582       int max = info->file_max[TGSI_FILE_INPUT] = -1;
583       nir_foreach_shader_in_variable(var, nir) {
584          int slots = glsl_count_attribute_slots(var->type, false);
585          int tmax = var->data.driver_location + slots - 1;
586          if (tmax > max)
587             max = tmax;
588          info->file_max[TGSI_FILE_INPUT] = max;
589       }
590    }
591 
592    i = 0;
593    uint64_t processed_outputs = 0;
594    unsigned num_outputs = 0;
595    nir_foreach_shader_out_variable(variable, nir) {
596       unsigned semantic_name, semantic_index;
597 
598       i = variable->data.driver_location;
599 
600       const struct glsl_type *type = variable->type;
601       if (nir_is_arrayed_io(variable, nir->info.stage)) {
602          assert(glsl_type_is_array(type));
603          type = glsl_get_array_element(type);
604       }
605 
606       unsigned attrib_count = variable->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4) :
607          glsl_count_attribute_slots(type, false);
608       for (unsigned k = 0; k < attrib_count; k++, i++) {
609 
610          if (nir->info.stage == MESA_SHADER_FRAGMENT) {
611             tgsi_get_gl_frag_result_semantic(variable->data.location + k,
612                                              &semantic_name, &semantic_index);
613 
614             /* Adjust for dual source blending */
615             if (variable->data.index > 0) {
616                semantic_index++;
617             }
618          } else {
619             tgsi_get_gl_varying_semantic(variable->data.location + k, need_texcoord,
620                                          &semantic_name, &semantic_index);
621          }
622 
623          unsigned num_components = 4;
624          unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
625          if (vector_elements)
626             num_components = vector_elements;
627 
628          unsigned component = variable->data.location_frac;
629          if (glsl_type_is_64bit(glsl_without_array(variable->type))) {
630             if (glsl_type_is_dual_slot(glsl_without_array(variable->type)) && k % 2) {
631                num_components = (num_components * 2) - 4;
632                component = 0;
633             } else {
634                num_components = MIN2(num_components * 2, 4);
635             }
636          }
637 
638          ubyte usagemask = 0;
639          for (unsigned j = component; j < num_components + component; j++) {
640             switch (j) {
641             case 0:
642                usagemask |= TGSI_WRITEMASK_X;
643                break;
644             case 1:
645                usagemask |= TGSI_WRITEMASK_Y;
646                break;
647             case 2:
648                usagemask |= TGSI_WRITEMASK_Z;
649                break;
650             case 3:
651                usagemask |= TGSI_WRITEMASK_W;
652                break;
653             default:
654                unreachable("error calculating component index");
655             }
656          }
657 
658          unsigned gs_out_streams;
659          if (variable->data.stream & NIR_STREAM_PACKED) {
660             gs_out_streams = variable->data.stream & ~NIR_STREAM_PACKED;
661          } else {
662             assert(variable->data.stream < 4);
663             gs_out_streams = 0;
664             for (unsigned j = 0; j < num_components; ++j)
665                gs_out_streams |= variable->data.stream << (2 * (component + j));
666          }
667 
668          unsigned streamx = gs_out_streams & 3;
669          unsigned streamy = (gs_out_streams >> 2) & 3;
670          unsigned streamz = (gs_out_streams >> 4) & 3;
671          unsigned streamw = (gs_out_streams >> 6) & 3;
672 
673          if (usagemask & TGSI_WRITEMASK_X) {
674             info->output_usagemask[i] |= TGSI_WRITEMASK_X;
675             info->output_streams[i] |= streamx;
676             info->num_stream_output_components[streamx]++;
677          }
678          if (usagemask & TGSI_WRITEMASK_Y) {
679             info->output_usagemask[i] |= TGSI_WRITEMASK_Y;
680             info->output_streams[i] |= streamy << 2;
681             info->num_stream_output_components[streamy]++;
682          }
683          if (usagemask & TGSI_WRITEMASK_Z) {
684             info->output_usagemask[i] |= TGSI_WRITEMASK_Z;
685             info->output_streams[i] |= streamz << 4;
686             info->num_stream_output_components[streamz]++;
687          }
688          if (usagemask & TGSI_WRITEMASK_W) {
689             info->output_usagemask[i] |= TGSI_WRITEMASK_W;
690             info->output_streams[i] |= streamw << 6;
691             info->num_stream_output_components[streamw]++;
692          }
693 
694          /* make sure we only count this location once against
695           * the num_outputs counter.
696           */
697          if (processed_outputs & ((uint64_t)1 << i))
698             continue;
699 
700          processed_outputs |= ((uint64_t)1 << i);
701          num_outputs++;
702 
703          info->output_semantic_name[i] = semantic_name;
704          info->output_semantic_index[i] = semantic_index;
705 
706          switch (semantic_name) {
707          case TGSI_SEMANTIC_PRIMID:
708             info->writes_primid = true;
709             break;
710          case TGSI_SEMANTIC_VIEWPORT_INDEX:
711             info->writes_viewport_index = true;
712             break;
713          case TGSI_SEMANTIC_LAYER:
714             info->writes_layer = true;
715             break;
716          case TGSI_SEMANTIC_PSIZE:
717             info->writes_psize = true;
718             break;
719          case TGSI_SEMANTIC_CLIPVERTEX:
720             info->writes_clipvertex = true;
721             break;
722          case TGSI_SEMANTIC_COLOR:
723             info->colors_written |= 1 << semantic_index;
724             break;
725          case TGSI_SEMANTIC_STENCIL:
726             info->writes_stencil = true;
727             break;
728          case TGSI_SEMANTIC_SAMPLEMASK:
729             info->writes_samplemask = true;
730             break;
731          case TGSI_SEMANTIC_EDGEFLAG:
732             info->writes_edgeflag = true;
733             break;
734          case TGSI_SEMANTIC_POSITION:
735             if (info->processor == PIPE_SHADER_FRAGMENT)
736                info->writes_z = true;
737             else
738                info->writes_position = true;
739             break;
740          }
741 
742          if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
743             switch (semantic_name) {
744             case TGSI_SEMANTIC_PATCH:
745                info->reads_perpatch_outputs = true;
746                break;
747             case TGSI_SEMANTIC_TESSINNER:
748             case TGSI_SEMANTIC_TESSOUTER:
749                info->reads_tessfactor_outputs = true;
750                break;
751             default:
752                info->reads_pervertex_outputs = true;
753             }
754          }
755       }
756 
757       unsigned loc = variable->data.location;
758       if (nir->info.stage == MESA_SHADER_FRAGMENT &&
759           loc == FRAG_RESULT_COLOR &&
760           nir->info.outputs_written & (1ull << loc)) {
761          assert(attrib_count == 1);
762          info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
763       }
764    }
765 
766    if (nir->info.io_lowered) {
767       uint64_t outputs_written = nir->info.outputs_written;
768 
769       while (outputs_written) {
770          unsigned location = u_bit_scan64(&outputs_written);
771          unsigned i = util_bitcount64(nir->info.outputs_written &
772                                       BITFIELD64_MASK(location));
773          unsigned semantic_name, semantic_index;
774 
775          tgsi_get_gl_varying_semantic(location, need_texcoord,
776                                       &semantic_name, &semantic_index);
777 
778          info->output_semantic_name[i] = semantic_name;
779          info->output_semantic_index[i] = semantic_index;
780          info->output_usagemask[i] = 0xf;
781       }
782       num_outputs = util_bitcount64(nir->info.outputs_written);
783       if (nir->info.outputs_accessed_indirectly)
784          info->indirect_files |= 1 << TGSI_FILE_OUTPUT;
785    }
786 
787    uint32_t sampler_mask = 0, image_mask = 0;
788    nir_foreach_uniform_variable(var, nir) {
789       uint32_t sampler_count = glsl_type_get_sampler_count(var->type);
790       uint32_t image_count = glsl_type_get_image_count(var->type);
791       sampler_mask |= ((1ull << sampler_count) - 1) << var->data.binding;
792       image_mask |= ((1ull << image_count) - 1) << var->data.binding;
793    }
794    info->num_outputs = num_outputs;
795 
796    info->const_file_max[0] = nir->num_uniforms - 1;
797    info->const_buffers_declared = u_bit_consecutive(1, nir->info.num_ubos);
798    if (nir->num_uniforms > 0)
799       info->const_buffers_declared |= 1;
800    info->images_declared = image_mask;
801    info->samplers_declared = sampler_mask;
802 
803    info->file_max[TGSI_FILE_SAMPLER] = util_last_bit(info->samplers_declared) - 1;
804    info->file_max[TGSI_FILE_SAMPLER_VIEW] = BITSET_LAST_BIT(nir->info.textures_used) - 1;
805    info->file_mask[TGSI_FILE_SAMPLER] = info->samplers_declared;
806    info->file_mask[TGSI_FILE_SAMPLER_VIEW] = nir->info.textures_used[0];
807    info->file_max[TGSI_FILE_IMAGE] = util_last_bit(info->images_declared) - 1;
808    info->file_mask[TGSI_FILE_IMAGE] = info->images_declared;
809 
810    info->num_written_clipdistance = nir->info.clip_distance_array_size;
811    info->num_written_culldistance = nir->info.cull_distance_array_size;
812    info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
813    info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
814 
815    if (info->processor == PIPE_SHADER_FRAGMENT)
816       info->uses_kill = nir->info.fs.uses_discard;
817 
818    func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
819    nir_foreach_block(block, func->impl) {
820       nir_foreach_instr(instr, block)
821          scan_instruction(nir, need_texcoord, info, instr);
822    }
823 }
824