1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file linker.cpp
26  * GLSL linker implementation
27  *
28  * Given a set of shaders that are to be linked to generate a final program,
29  * there are three distinct stages.
30  *
31  * In the first stage shaders are partitioned into groups based on the shader
32  * type.  All shaders of a particular type (e.g., vertex shaders) are linked
33  * together.
34  *
35  *   - Undefined references in each shader are resolve to definitions in
36  *     another shader.
37  *   - Types and qualifiers of uniforms, outputs, and global variables defined
38  *     in multiple shaders with the same name are verified to be the same.
39  *   - Initializers for uniforms and global variables defined
40  *     in multiple shaders with the same name are verified to be the same.
41  *
42  * The result, in the terminology of the GLSL spec, is a set of shader
43  * executables for each processing unit.
44  *
45  * After the first stage is complete, a series of semantic checks are performed
46  * on each of the shader executables.
47  *
48  *   - Each shader executable must define a \c main function.
49  *   - Each vertex shader executable must write to \c gl_Position.
50  *   - Each fragment shader executable must write to either \c gl_FragData or
51  *     \c gl_FragColor.
52  *
53  * In the final stage individual shader executables are linked to create a
54  * complete exectuable.
55  *
56  *   - Types of uniforms defined in multiple shader stages with the same name
57  *     are verified to be the same.
58  *   - Initializers for uniforms defined in multiple shader stages with the
59  *     same name are verified to be the same.
60  *   - Types and qualifiers of outputs defined in one stage are verified to
61  *     be the same as the types and qualifiers of inputs defined with the same
62  *     name in a later stage.
63  *
64  * \author Ian Romanick <ian.d.romanick@intel.com>
65  */
66 
67 #include <ctype.h>
68 #include "util/strndup.h"
69 #include "glsl_symbol_table.h"
70 #include "glsl_parser_extras.h"
71 #include "ir.h"
72 #include "program.h"
73 #include "program/prog_instruction.h"
74 #include "program/program.h"
75 #include "util/mesa-sha1.h"
76 #include "util/set.h"
77 #include "string_to_uint_map.h"
78 #include "linker.h"
79 #include "linker_util.h"
80 #include "link_varyings.h"
81 #include "ir_optimization.h"
82 #include "ir_rvalue_visitor.h"
83 #include "ir_uniform.h"
84 #include "builtin_functions.h"
85 #include "shader_cache.h"
86 #include "util/u_string.h"
87 #include "util/u_math.h"
88 
89 
90 #include "main/shaderobj.h"
91 #include "main/enums.h"
92 #include "main/mtypes.h"
93 
94 
95 namespace {
96 
97 struct find_variable {
98    const char *name;
99    bool found;
100 
find_variable__anona9fe6a0d0111::find_variable101    find_variable(const char *name) : name(name), found(false) {}
102 };
103 
104 /**
105  * Visitor that determines whether or not a variable is ever written.
106  * Note: this is only considering if the variable is statically written
107  * (= regardless of the runtime flow of control)
108  *
109  * Use \ref find_assignments for convenience.
110  */
111 class find_assignment_visitor : public ir_hierarchical_visitor {
112 public:
find_assignment_visitor(unsigned num_vars,find_variable * const * vars)113    find_assignment_visitor(unsigned num_vars,
114                            find_variable * const *vars)
115       : num_variables(num_vars), num_found(0), variables(vars)
116    {
117    }
118 
visit_enter(ir_assignment * ir)119    virtual ir_visitor_status visit_enter(ir_assignment *ir)
120    {
121       ir_variable *const var = ir->lhs->variable_referenced();
122 
123       return check_variable_name(var->name);
124    }
125 
visit_enter(ir_call * ir)126    virtual ir_visitor_status visit_enter(ir_call *ir)
127    {
128       foreach_two_lists(formal_node, &ir->callee->parameters,
129                         actual_node, &ir->actual_parameters) {
130          ir_rvalue *param_rval = (ir_rvalue *) actual_node;
131          ir_variable *sig_param = (ir_variable *) formal_node;
132 
133          if (sig_param->data.mode == ir_var_function_out ||
134              sig_param->data.mode == ir_var_function_inout) {
135             ir_variable *var = param_rval->variable_referenced();
136             if (var && check_variable_name(var->name) == visit_stop)
137                return visit_stop;
138          }
139       }
140 
141       if (ir->return_deref != NULL) {
142          ir_variable *const var = ir->return_deref->variable_referenced();
143 
144          if (check_variable_name(var->name) == visit_stop)
145             return visit_stop;
146       }
147 
148       return visit_continue_with_parent;
149    }
150 
151 private:
check_variable_name(const char * name)152    ir_visitor_status check_variable_name(const char *name)
153    {
154       for (unsigned i = 0; i < num_variables; ++i) {
155          if (strcmp(variables[i]->name, name) == 0) {
156             if (!variables[i]->found) {
157                variables[i]->found = true;
158 
159                assert(num_found < num_variables);
160                if (++num_found == num_variables)
161                   return visit_stop;
162             }
163             break;
164          }
165       }
166 
167       return visit_continue_with_parent;
168    }
169 
170 private:
171    unsigned num_variables;           /**< Number of variables to find */
172    unsigned num_found;               /**< Number of variables already found */
173    find_variable * const *variables; /**< Variables to find */
174 };
175 
176 /**
177  * Determine whether or not any of NULL-terminated list of variables is ever
178  * written to.
179  */
180 static void
find_assignments(exec_list * ir,find_variable * const * vars)181 find_assignments(exec_list *ir, find_variable * const *vars)
182 {
183    unsigned num_variables = 0;
184 
185    for (find_variable * const *v = vars; *v; ++v)
186       num_variables++;
187 
188    find_assignment_visitor visitor(num_variables, vars);
189    visitor.run(ir);
190 }
191 
192 /**
193  * Determine whether or not the given variable is ever written to.
194  */
195 static void
find_assignments(exec_list * ir,find_variable * var)196 find_assignments(exec_list *ir, find_variable *var)
197 {
198    find_assignment_visitor visitor(1, &var);
199    visitor.run(ir);
200 }
201 
202 /**
203  * Visitor that determines whether or not a variable is ever read.
204  */
205 class find_deref_visitor : public ir_hierarchical_visitor {
206 public:
find_deref_visitor(const char * name)207    find_deref_visitor(const char *name)
208       : name(name), found(false)
209    {
210       /* empty */
211    }
212 
visit(ir_dereference_variable * ir)213    virtual ir_visitor_status visit(ir_dereference_variable *ir)
214    {
215       if (strcmp(this->name, ir->var->name) == 0) {
216          this->found = true;
217          return visit_stop;
218       }
219 
220       return visit_continue;
221    }
222 
variable_found() const223    bool variable_found() const
224    {
225       return this->found;
226    }
227 
228 private:
229    const char *name;       /**< Find writes to a variable with this name. */
230    bool found;             /**< Was a write to the variable found? */
231 };
232 
233 
234 /**
235  * A visitor helper that provides methods for updating the types of
236  * ir_dereferences.  Classes that update variable types (say, updating
237  * array sizes) will want to use this so that dereference types stay in sync.
238  */
239 class deref_type_updater : public ir_hierarchical_visitor {
240 public:
visit(ir_dereference_variable * ir)241    virtual ir_visitor_status visit(ir_dereference_variable *ir)
242    {
243       ir->type = ir->var->type;
244       return visit_continue;
245    }
246 
visit_leave(ir_dereference_array * ir)247    virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
248    {
249       const glsl_type *const vt = ir->array->type;
250       if (vt->is_array())
251          ir->type = vt->fields.array;
252       return visit_continue;
253    }
254 
visit_leave(ir_dereference_record * ir)255    virtual ir_visitor_status visit_leave(ir_dereference_record *ir)
256    {
257       ir->type = ir->record->type->fields.structure[ir->field_idx].type;
258       return visit_continue;
259    }
260 };
261 
262 
263 class array_resize_visitor : public deref_type_updater {
264 public:
265    using deref_type_updater::visit;
266 
267    unsigned num_vertices;
268    gl_shader_program *prog;
269    gl_shader_stage stage;
270 
array_resize_visitor(unsigned num_vertices,gl_shader_program * prog,gl_shader_stage stage)271    array_resize_visitor(unsigned num_vertices,
272                         gl_shader_program *prog,
273                         gl_shader_stage stage)
274    {
275       this->num_vertices = num_vertices;
276       this->prog = prog;
277       this->stage = stage;
278    }
279 
~array_resize_visitor()280    virtual ~array_resize_visitor()
281    {
282       /* empty */
283    }
284 
visit(ir_variable * var)285    virtual ir_visitor_status visit(ir_variable *var)
286    {
287       if (!var->type->is_array() || var->data.mode != ir_var_shader_in ||
288           var->data.patch)
289          return visit_continue;
290 
291       unsigned size = var->type->length;
292 
293       if (stage == MESA_SHADER_GEOMETRY) {
294          /* Generate a link error if the shader has declared this array with
295           * an incorrect size.
296           */
297          if (!var->data.implicit_sized_array &&
298              size && size != this->num_vertices) {
299             linker_error(this->prog, "size of array %s declared as %u, "
300                          "but number of input vertices is %u\n",
301                          var->name, size, this->num_vertices);
302             return visit_continue;
303          }
304 
305          /* Generate a link error if the shader attempts to access an input
306           * array using an index too large for its actual size assigned at
307           * link time.
308           */
309          if (var->data.max_array_access >= (int)this->num_vertices) {
310             linker_error(this->prog, "%s shader accesses element %i of "
311                          "%s, but only %i input vertices\n",
312                          _mesa_shader_stage_to_string(this->stage),
313                          var->data.max_array_access, var->name, this->num_vertices);
314             return visit_continue;
315          }
316       }
317 
318       var->type = glsl_type::get_array_instance(var->type->fields.array,
319                                                 this->num_vertices);
320       var->data.max_array_access = this->num_vertices - 1;
321 
322       return visit_continue;
323    }
324 };
325 
326 class array_length_to_const_visitor : public ir_rvalue_visitor {
327 public:
array_length_to_const_visitor()328    array_length_to_const_visitor()
329    {
330       this->progress = false;
331    }
332 
~array_length_to_const_visitor()333    virtual ~array_length_to_const_visitor()
334    {
335       /* empty */
336    }
337 
338    bool progress;
339 
handle_rvalue(ir_rvalue ** rvalue)340    virtual void handle_rvalue(ir_rvalue **rvalue)
341    {
342       if (*rvalue == NULL || (*rvalue)->ir_type != ir_type_expression)
343          return;
344 
345       ir_expression *expr = (*rvalue)->as_expression();
346       if (expr) {
347          if (expr->operation == ir_unop_implicitly_sized_array_length) {
348             assert(!expr->operands[0]->type->is_unsized_array());
349             ir_constant *constant = new(expr)
350                ir_constant(expr->operands[0]->type->array_size());
351             if (constant) {
352                *rvalue = constant;
353             }
354          }
355       }
356    }
357 };
358 
359 /**
360  * Visitor that determines the highest stream id to which a (geometry) shader
361  * emits vertices. It also checks whether End{Stream}Primitive is ever called.
362  */
363 class find_emit_vertex_visitor : public ir_hierarchical_visitor {
364 public:
find_emit_vertex_visitor(int max_allowed)365    find_emit_vertex_visitor(int max_allowed)
366       : max_stream_allowed(max_allowed),
367         invalid_stream_id(0),
368         invalid_stream_id_from_emit_vertex(false),
369         end_primitive_found(false),
370         used_streams(0)
371    {
372       /* empty */
373    }
374 
visit_leave(ir_emit_vertex * ir)375    virtual ir_visitor_status visit_leave(ir_emit_vertex *ir)
376    {
377       int stream_id = ir->stream_id();
378 
379       if (stream_id < 0) {
380          invalid_stream_id = stream_id;
381          invalid_stream_id_from_emit_vertex = true;
382          return visit_stop;
383       }
384 
385       if (stream_id > max_stream_allowed) {
386          invalid_stream_id = stream_id;
387          invalid_stream_id_from_emit_vertex = true;
388          return visit_stop;
389       }
390 
391       used_streams |= 1 << stream_id;
392 
393       return visit_continue;
394    }
395 
visit_leave(ir_end_primitive * ir)396    virtual ir_visitor_status visit_leave(ir_end_primitive *ir)
397    {
398       end_primitive_found = true;
399 
400       int stream_id = ir->stream_id();
401 
402       if (stream_id < 0) {
403          invalid_stream_id = stream_id;
404          invalid_stream_id_from_emit_vertex = false;
405          return visit_stop;
406       }
407 
408       if (stream_id > max_stream_allowed) {
409          invalid_stream_id = stream_id;
410          invalid_stream_id_from_emit_vertex = false;
411          return visit_stop;
412       }
413 
414       used_streams |= 1 << stream_id;
415 
416       return visit_continue;
417    }
418 
error()419    bool error()
420    {
421       return invalid_stream_id != 0;
422    }
423 
error_func()424    const char *error_func()
425    {
426       return invalid_stream_id_from_emit_vertex ?
427          "EmitStreamVertex" : "EndStreamPrimitive";
428    }
429 
error_stream()430    int error_stream()
431    {
432       return invalid_stream_id;
433    }
434 
active_stream_mask()435    unsigned active_stream_mask()
436    {
437       return used_streams;
438    }
439 
uses_end_primitive()440    bool uses_end_primitive()
441    {
442       return end_primitive_found;
443    }
444 
445 private:
446    int max_stream_allowed;
447    int invalid_stream_id;
448    bool invalid_stream_id_from_emit_vertex;
449    bool end_primitive_found;
450    unsigned used_streams;
451 };
452 
453 /* Class that finds array derefs and check if indexes are dynamic. */
454 class dynamic_sampler_array_indexing_visitor : public ir_hierarchical_visitor
455 {
456 public:
dynamic_sampler_array_indexing_visitor()457    dynamic_sampler_array_indexing_visitor() :
458       dynamic_sampler_array_indexing(false)
459    {
460    }
461 
visit_enter(ir_dereference_array * ir)462    ir_visitor_status visit_enter(ir_dereference_array *ir)
463    {
464       if (!ir->variable_referenced())
465          return visit_continue;
466 
467       if (!ir->variable_referenced()->type->contains_sampler())
468          return visit_continue;
469 
470       if (!ir->array_index->constant_expression_value(ralloc_parent(ir))) {
471          dynamic_sampler_array_indexing = true;
472          return visit_stop;
473       }
474       return visit_continue;
475    }
476 
uses_dynamic_sampler_array_indexing()477    bool uses_dynamic_sampler_array_indexing()
478    {
479       return dynamic_sampler_array_indexing;
480    }
481 
482 private:
483    bool dynamic_sampler_array_indexing;
484 };
485 
486 } /* anonymous namespace */
487 
488 void
linker_error(gl_shader_program * prog,const char * fmt,...)489 linker_error(gl_shader_program *prog, const char *fmt, ...)
490 {
491    va_list ap;
492 
493    ralloc_strcat(&prog->data->InfoLog, "error: ");
494    va_start(ap, fmt);
495    ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
496    va_end(ap);
497 
498    prog->data->LinkStatus = LINKING_FAILURE;
499 }
500 
501 
502 void
linker_warning(gl_shader_program * prog,const char * fmt,...)503 linker_warning(gl_shader_program *prog, const char *fmt, ...)
504 {
505    va_list ap;
506 
507    ralloc_strcat(&prog->data->InfoLog, "warning: ");
508    va_start(ap, fmt);
509    ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
510    va_end(ap);
511 
512 }
513 
514 
515 /**
516  * Given a string identifying a program resource, break it into a base name
517  * and an optional array index in square brackets.
518  *
519  * If an array index is present, \c out_base_name_end is set to point to the
520  * "[" that precedes the array index, and the array index itself is returned
521  * as a long.
522  *
523  * If no array index is present (or if the array index is negative or
524  * mal-formed), \c out_base_name_end, is set to point to the null terminator
525  * at the end of the input string, and -1 is returned.
526  *
527  * Only the final array index is parsed; if the string contains other array
528  * indices (or structure field accesses), they are left in the base name.
529  *
530  * No attempt is made to check that the base name is properly formed;
531  * typically the caller will look up the base name in a hash table, so
532  * ill-formed base names simply turn into hash table lookup failures.
533  */
534 long
parse_program_resource_name(const GLchar * name,const size_t len,const GLchar ** out_base_name_end)535 parse_program_resource_name(const GLchar *name,
536                             const size_t len,
537                             const GLchar **out_base_name_end)
538 {
539    /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
540     *
541     *     "When an integer array element or block instance number is part of
542     *     the name string, it will be specified in decimal form without a "+"
543     *     or "-" sign or any extra leading zeroes. Additionally, the name
544     *     string will not include white space anywhere in the string."
545     */
546 
547    *out_base_name_end = name + len;
548 
549    if (len == 0 || name[len-1] != ']')
550       return -1;
551 
552    /* Walk backwards over the string looking for a non-digit character.  This
553     * had better be the opening bracket for an array index.
554     *
555     * Initially, i specifies the location of the ']'.  Since the string may
556     * contain only the ']' charcater, walk backwards very carefully.
557     */
558    unsigned i;
559    for (i = len - 1; (i > 0) && isdigit(name[i-1]); --i)
560       /* empty */ ;
561 
562    if ((i == 0) || name[i-1] != '[')
563       return -1;
564 
565    long array_index = strtol(&name[i], NULL, 10);
566    if (array_index < 0)
567       return -1;
568 
569    /* Check for leading zero */
570    if (name[i] == '0' && name[i+1] != ']')
571       return -1;
572 
573    *out_base_name_end = name + (i - 1);
574    return array_index;
575 }
576 
577 
578 void
link_invalidate_variable_locations(exec_list * ir)579 link_invalidate_variable_locations(exec_list *ir)
580 {
581    foreach_in_list(ir_instruction, node, ir) {
582       ir_variable *const var = node->as_variable();
583 
584       if (var == NULL)
585          continue;
586 
587       /* Only assign locations for variables that lack an explicit location.
588        * Explicit locations are set for all built-in variables, generic vertex
589        * shader inputs (via layout(location=...)), and generic fragment shader
590        * outputs (also via layout(location=...)).
591        */
592       if (!var->data.explicit_location) {
593          var->data.location = -1;
594          var->data.location_frac = 0;
595       }
596 
597       /* ir_variable::is_unmatched_generic_inout is used by the linker while
598        * connecting outputs from one stage to inputs of the next stage.
599        */
600       if (var->data.explicit_location &&
601           var->data.location < VARYING_SLOT_VAR0) {
602          var->data.is_unmatched_generic_inout = 0;
603       } else {
604          var->data.is_unmatched_generic_inout = 1;
605       }
606    }
607 }
608 
609 
610 /**
611  * Set clip_distance_array_size based and cull_distance_array_size on the given
612  * shader.
613  *
614  * Also check for errors based on incorrect usage of gl_ClipVertex and
615  * gl_ClipDistance and gl_CullDistance.
616  * Additionally test whether the arrays gl_ClipDistance and gl_CullDistance
617  * exceed the maximum size defined by gl_MaxCombinedClipAndCullDistances.
618  *
619  * Return false if an error was reported.
620  */
621 static void
analyze_clip_cull_usage(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts,struct shader_info * info)622 analyze_clip_cull_usage(struct gl_shader_program *prog,
623                         struct gl_linked_shader *shader,
624                         const struct gl_constants *consts,
625                         struct shader_info *info)
626 {
627    if (consts->DoDCEBeforeClipCullAnalysis) {
628       /* Remove dead functions to avoid raising an error (eg: dead function
629        * writes to gl_ClipVertex, and main() writes to gl_ClipDistance).
630        */
631       do_dead_functions(shader->ir);
632    }
633 
634    info->clip_distance_array_size = 0;
635    info->cull_distance_array_size = 0;
636 
637    if (prog->data->Version >= (prog->IsES ? 300 : 130)) {
638       /* From section 7.1 (Vertex Shader Special Variables) of the
639        * GLSL 1.30 spec:
640        *
641        *   "It is an error for a shader to statically write both
642        *   gl_ClipVertex and gl_ClipDistance."
643        *
644        * This does not apply to GLSL ES shaders, since GLSL ES defines neither
645        * gl_ClipVertex nor gl_ClipDistance. However with
646        * GL_EXT_clip_cull_distance, this functionality is exposed in ES 3.0.
647        */
648       find_variable gl_ClipDistance("gl_ClipDistance");
649       find_variable gl_CullDistance("gl_CullDistance");
650       find_variable gl_ClipVertex("gl_ClipVertex");
651       find_variable * const variables[] = {
652          &gl_ClipDistance,
653          &gl_CullDistance,
654          !prog->IsES ? &gl_ClipVertex : NULL,
655          NULL
656       };
657       find_assignments(shader->ir, variables);
658 
659       /* From the ARB_cull_distance spec:
660        *
661        * It is a compile-time or link-time error for the set of shaders forming
662        * a program to statically read or write both gl_ClipVertex and either
663        * gl_ClipDistance or gl_CullDistance.
664        *
665        * This does not apply to GLSL ES shaders, since GLSL ES doesn't define
666        * gl_ClipVertex.
667        */
668       if (!prog->IsES) {
669          if (gl_ClipVertex.found && gl_ClipDistance.found) {
670             linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
671                          "and `gl_ClipDistance'\n",
672                          _mesa_shader_stage_to_string(shader->Stage));
673             return;
674          }
675          if (gl_ClipVertex.found && gl_CullDistance.found) {
676             linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
677                          "and `gl_CullDistance'\n",
678                          _mesa_shader_stage_to_string(shader->Stage));
679             return;
680          }
681       }
682 
683       if (gl_ClipDistance.found) {
684          ir_variable *clip_distance_var =
685                 shader->symbols->get_variable("gl_ClipDistance");
686          assert(clip_distance_var);
687          info->clip_distance_array_size = clip_distance_var->type->length;
688       }
689       if (gl_CullDistance.found) {
690          ir_variable *cull_distance_var =
691                 shader->symbols->get_variable("gl_CullDistance");
692          assert(cull_distance_var);
693          info->cull_distance_array_size = cull_distance_var->type->length;
694       }
695       /* From the ARB_cull_distance spec:
696        *
697        * It is a compile-time or link-time error for the set of shaders forming
698        * a program to have the sum of the sizes of the gl_ClipDistance and
699        * gl_CullDistance arrays to be larger than
700        * gl_MaxCombinedClipAndCullDistances.
701        */
702       if ((uint32_t)(info->clip_distance_array_size + info->cull_distance_array_size) >
703           consts->MaxClipPlanes) {
704           linker_error(prog, "%s shader: the combined size of "
705                        "'gl_ClipDistance' and 'gl_CullDistance' size cannot "
706                        "be larger than "
707                        "gl_MaxCombinedClipAndCullDistances (%u)",
708                        _mesa_shader_stage_to_string(shader->Stage),
709                        consts->MaxClipPlanes);
710       }
711    }
712 }
713 
714 
715 /**
716  * Verify that a vertex shader executable meets all semantic requirements.
717  *
718  * Also sets info.clip_distance_array_size and
719  * info.cull_distance_array_size as a side effect.
720  *
721  * \param shader  Vertex shader executable to be verified
722  */
723 static void
validate_vertex_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts)724 validate_vertex_shader_executable(struct gl_shader_program *prog,
725                                   struct gl_linked_shader *shader,
726                                   const struct gl_constants *consts)
727 {
728    if (shader == NULL)
729       return;
730 
731    /* From the GLSL 1.10 spec, page 48:
732     *
733     *     "The variable gl_Position is available only in the vertex
734     *      language and is intended for writing the homogeneous vertex
735     *      position. All executions of a well-formed vertex shader
736     *      executable must write a value into this variable. [...] The
737     *      variable gl_Position is available only in the vertex
738     *      language and is intended for writing the homogeneous vertex
739     *      position. All executions of a well-formed vertex shader
740     *      executable must write a value into this variable."
741     *
742     * while in GLSL 1.40 this text is changed to:
743     *
744     *     "The variable gl_Position is available only in the vertex
745     *      language and is intended for writing the homogeneous vertex
746     *      position. It can be written at any time during shader
747     *      execution. It may also be read back by a vertex shader
748     *      after being written. This value will be used by primitive
749     *      assembly, clipping, culling, and other fixed functionality
750     *      operations, if present, that operate on primitives after
751     *      vertex processing has occurred. Its value is undefined if
752     *      the vertex shader executable does not write gl_Position."
753     *
754     * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
755     * gl_Position is not an error.
756     */
757    if (prog->data->Version < (prog->IsES ? 300 : 140)) {
758       find_variable gl_Position("gl_Position");
759       find_assignments(shader->ir, &gl_Position);
760       if (!gl_Position.found) {
761         if (prog->IsES) {
762           linker_warning(prog,
763                          "vertex shader does not write to `gl_Position'. "
764                          "Its value is undefined. \n");
765         } else {
766           linker_error(prog,
767                        "vertex shader does not write to `gl_Position'. \n");
768         }
769          return;
770       }
771    }
772 
773    analyze_clip_cull_usage(prog, shader, consts, &shader->Program->info);
774 }
775 
776 static void
validate_tess_eval_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts)777 validate_tess_eval_shader_executable(struct gl_shader_program *prog,
778                                      struct gl_linked_shader *shader,
779                                      const struct gl_constants *consts)
780 {
781    if (shader == NULL)
782       return;
783 
784    analyze_clip_cull_usage(prog, shader, consts, &shader->Program->info);
785 }
786 
787 
788 /**
789  * Verify that a fragment shader executable meets all semantic requirements
790  *
791  * \param shader  Fragment shader executable to be verified
792  */
793 static void
validate_fragment_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader)794 validate_fragment_shader_executable(struct gl_shader_program *prog,
795                                     struct gl_linked_shader *shader)
796 {
797    if (shader == NULL)
798       return;
799 
800    find_variable gl_FragColor("gl_FragColor");
801    find_variable gl_FragData("gl_FragData");
802    find_variable * const variables[] = { &gl_FragColor, &gl_FragData, NULL };
803    find_assignments(shader->ir, variables);
804 
805    if (gl_FragColor.found && gl_FragData.found) {
806       linker_error(prog,  "fragment shader writes to both "
807                    "`gl_FragColor' and `gl_FragData'\n");
808    }
809 }
810 
811 /**
812  * Verify that a geometry shader executable meets all semantic requirements
813  *
814  * Also sets prog->Geom.VerticesIn, and info.clip_distance_array_sizeand
815  * info.cull_distance_array_size as a side effect.
816  *
817  * \param shader Geometry shader executable to be verified
818  */
819 static void
validate_geometry_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts)820 validate_geometry_shader_executable(struct gl_shader_program *prog,
821                                     struct gl_linked_shader *shader,
822                                     const struct gl_constants *consts)
823 {
824    if (shader == NULL)
825       return;
826 
827    unsigned num_vertices =
828       vertices_per_prim(shader->Program->info.gs.input_primitive);
829    prog->Geom.VerticesIn = num_vertices;
830 
831    analyze_clip_cull_usage(prog, shader, consts, &shader->Program->info);
832 }
833 
834 /**
835  * Check if geometry shaders emit to non-zero streams and do corresponding
836  * validations.
837  */
838 static void
validate_geometry_shader_emissions(const struct gl_constants * consts,struct gl_shader_program * prog)839 validate_geometry_shader_emissions(const struct gl_constants *consts,
840                                    struct gl_shader_program *prog)
841 {
842    struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
843 
844    if (sh != NULL) {
845       find_emit_vertex_visitor emit_vertex(consts->MaxVertexStreams - 1);
846       emit_vertex.run(sh->ir);
847       if (emit_vertex.error()) {
848          linker_error(prog, "Invalid call %s(%d). Accepted values for the "
849                       "stream parameter are in the range [0, %d].\n",
850                       emit_vertex.error_func(),
851                       emit_vertex.error_stream(),
852                       consts->MaxVertexStreams - 1);
853       }
854       prog->Geom.ActiveStreamMask = emit_vertex.active_stream_mask();
855       prog->Geom.UsesEndPrimitive = emit_vertex.uses_end_primitive();
856 
857       /* From the ARB_gpu_shader5 spec:
858        *
859        *   "Multiple vertex streams are supported only if the output primitive
860        *    type is declared to be "points".  A program will fail to link if it
861        *    contains a geometry shader calling EmitStreamVertex() or
862        *    EndStreamPrimitive() if its output primitive type is not "points".
863        *
864        * However, in the same spec:
865        *
866        *   "The function EmitVertex() is equivalent to calling EmitStreamVertex()
867        *    with <stream> set to zero."
868        *
869        * And:
870        *
871        *   "The function EndPrimitive() is equivalent to calling
872        *    EndStreamPrimitive() with <stream> set to zero."
873        *
874        * Since we can call EmitVertex() and EndPrimitive() when we output
875        * primitives other than points, calling EmitStreamVertex(0) or
876        * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
877        * does. We can use prog->Geom.ActiveStreamMask to check whether only the
878        * first (zero) stream is active.
879        * stream.
880        */
881       if (prog->Geom.ActiveStreamMask & ~(1 << 0) &&
882           sh->Program->info.gs.output_primitive != GL_POINTS) {
883          linker_error(prog, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
884                       "with n>0 requires point output\n");
885       }
886    }
887 }
888 
889 bool
validate_intrastage_arrays(struct gl_shader_program * prog,ir_variable * const var,ir_variable * const existing,bool match_precision)890 validate_intrastage_arrays(struct gl_shader_program *prog,
891                            ir_variable *const var,
892                            ir_variable *const existing,
893                            bool match_precision)
894 {
895    /* Consider the types to be "the same" if both types are arrays
896     * of the same type and one of the arrays is implicitly sized.
897     * In addition, set the type of the linked variable to the
898     * explicitly sized array.
899     */
900    if (var->type->is_array() && existing->type->is_array()) {
901       const glsl_type *no_array_var = var->type->fields.array;
902       const glsl_type *no_array_existing = existing->type->fields.array;
903       bool type_matches;
904 
905       type_matches = (match_precision ?
906                       no_array_var == no_array_existing :
907                       no_array_var->compare_no_precision(no_array_existing));
908 
909       if (type_matches &&
910           ((var->type->length == 0)|| (existing->type->length == 0))) {
911          if (var->type->length != 0) {
912             if ((int)var->type->length <= existing->data.max_array_access) {
913                linker_error(prog, "%s `%s' declared as type "
914                            "`%s' but outermost dimension has an index"
915                            " of `%i'\n",
916                            mode_string(var),
917                            var->name, var->type->name,
918                            existing->data.max_array_access);
919             }
920             existing->type = var->type;
921             return true;
922          } else if (existing->type->length != 0) {
923             if((int)existing->type->length <= var->data.max_array_access &&
924                !existing->data.from_ssbo_unsized_array) {
925                linker_error(prog, "%s `%s' declared as type "
926                            "`%s' but outermost dimension has an index"
927                            " of `%i'\n",
928                            mode_string(var),
929                            var->name, existing->type->name,
930                            var->data.max_array_access);
931             }
932             return true;
933          }
934       }
935    }
936    return false;
937 }
938 
939 
940 /**
941  * Perform validation of global variables used across multiple shaders
942  */
943 static void
cross_validate_globals(const struct gl_constants * consts,struct gl_shader_program * prog,struct exec_list * ir,glsl_symbol_table * variables,bool uniforms_only)944 cross_validate_globals(const struct gl_constants *consts,
945                        struct gl_shader_program *prog,
946                        struct exec_list *ir, glsl_symbol_table *variables,
947                        bool uniforms_only)
948 {
949    foreach_in_list(ir_instruction, node, ir) {
950       ir_variable *const var = node->as_variable();
951 
952       if (var == NULL)
953          continue;
954 
955       if (uniforms_only && (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage))
956          continue;
957 
958       /* don't cross validate subroutine uniforms */
959       if (var->type->contains_subroutine())
960          continue;
961 
962       /* Don't cross validate interface instances. These are only relevant
963        * inside a shader. The cross validation is done at the Interface Block
964        * name level.
965        */
966       if (var->is_interface_instance())
967          continue;
968 
969       /* Don't cross validate temporaries that are at global scope.  These
970        * will eventually get pulled into the shaders 'main'.
971        */
972       if (var->data.mode == ir_var_temporary)
973          continue;
974 
975       /* If a global with this name has already been seen, verify that the
976        * new instance has the same type.  In addition, if the globals have
977        * initializers, the values of the initializers must be the same.
978        */
979       ir_variable *const existing = variables->get_variable(var->name);
980       if (existing != NULL) {
981          /* Check if types match. */
982          if (var->type != existing->type) {
983             if (!validate_intrastage_arrays(prog, var, existing)) {
984                /* If it is an unsized array in a Shader Storage Block,
985                 * two different shaders can access to different elements.
986                 * Because of that, they might be converted to different
987                 * sized arrays, then check that they are compatible but
988                 * ignore the array size.
989                 */
990                if (!(var->data.mode == ir_var_shader_storage &&
991                      var->data.from_ssbo_unsized_array &&
992                      existing->data.mode == ir_var_shader_storage &&
993                      existing->data.from_ssbo_unsized_array &&
994                      var->type->gl_type == existing->type->gl_type)) {
995                   linker_error(prog, "%s `%s' declared as type "
996                                  "`%s' and type `%s'\n",
997                                  mode_string(var),
998                                  var->name, var->type->name,
999                                  existing->type->name);
1000                   return;
1001                }
1002             }
1003          }
1004 
1005          if (var->data.explicit_location) {
1006             if (existing->data.explicit_location
1007                 && (var->data.location != existing->data.location)) {
1008                linker_error(prog, "explicit locations for %s "
1009                             "`%s' have differing values\n",
1010                             mode_string(var), var->name);
1011                return;
1012             }
1013 
1014             if (var->data.location_frac != existing->data.location_frac) {
1015                linker_error(prog, "explicit components for %s `%s' have "
1016                             "differing values\n", mode_string(var), var->name);
1017                return;
1018             }
1019 
1020             existing->data.location = var->data.location;
1021             existing->data.explicit_location = true;
1022          } else {
1023             /* Check if uniform with implicit location was marked explicit
1024              * by earlier shader stage. If so, mark it explicit in this stage
1025              * too to make sure later processing does not treat it as
1026              * implicit one.
1027              */
1028             if (existing->data.explicit_location) {
1029                var->data.location = existing->data.location;
1030                var->data.explicit_location = true;
1031             }
1032          }
1033 
1034          /* From the GLSL 4.20 specification:
1035           * "A link error will result if two compilation units in a program
1036           *  specify different integer-constant bindings for the same
1037           *  opaque-uniform name.  However, it is not an error to specify a
1038           *  binding on some but not all declarations for the same name"
1039           */
1040          if (var->data.explicit_binding) {
1041             if (existing->data.explicit_binding &&
1042                 var->data.binding != existing->data.binding) {
1043                linker_error(prog, "explicit bindings for %s "
1044                             "`%s' have differing values\n",
1045                             mode_string(var), var->name);
1046                return;
1047             }
1048 
1049             existing->data.binding = var->data.binding;
1050             existing->data.explicit_binding = true;
1051          }
1052 
1053          if (var->type->contains_atomic() &&
1054              var->data.offset != existing->data.offset) {
1055             linker_error(prog, "offset specifications for %s "
1056                          "`%s' have differing values\n",
1057                          mode_string(var), var->name);
1058             return;
1059          }
1060 
1061          /* Validate layout qualifiers for gl_FragDepth.
1062           *
1063           * From the AMD/ARB_conservative_depth specs:
1064           *
1065           *    "If gl_FragDepth is redeclared in any fragment shader in a
1066           *    program, it must be redeclared in all fragment shaders in
1067           *    that program that have static assignments to
1068           *    gl_FragDepth. All redeclarations of gl_FragDepth in all
1069           *    fragment shaders in a single program must have the same set
1070           *    of qualifiers."
1071           */
1072          if (strcmp(var->name, "gl_FragDepth") == 0) {
1073             bool layout_declared = var->data.depth_layout != ir_depth_layout_none;
1074             bool layout_differs =
1075                var->data.depth_layout != existing->data.depth_layout;
1076 
1077             if (layout_declared && layout_differs) {
1078                linker_error(prog,
1079                             "All redeclarations of gl_FragDepth in all "
1080                             "fragment shaders in a single program must have "
1081                             "the same set of qualifiers.\n");
1082             }
1083 
1084             if (var->data.used && layout_differs) {
1085                linker_error(prog,
1086                             "If gl_FragDepth is redeclared with a layout "
1087                             "qualifier in any fragment shader, it must be "
1088                             "redeclared with the same layout qualifier in "
1089                             "all fragment shaders that have assignments to "
1090                             "gl_FragDepth\n");
1091             }
1092          }
1093 
1094          /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
1095           *
1096           *     "If a shared global has multiple initializers, the
1097           *     initializers must all be constant expressions, and they
1098           *     must all have the same value. Otherwise, a link error will
1099           *     result. (A shared global having only one initializer does
1100           *     not require that initializer to be a constant expression.)"
1101           *
1102           * Previous to 4.20 the GLSL spec simply said that initializers
1103           * must have the same value.  In this case of non-constant
1104           * initializers, this was impossible to determine.  As a result,
1105           * no vendor actually implemented that behavior.  The 4.20
1106           * behavior matches the implemented behavior of at least one other
1107           * vendor, so we'll implement that for all GLSL versions.
1108           * If (at least) one of these constant expressions is implicit,
1109           * because it was added by glsl_zero_init, we skip the verification.
1110           */
1111          if (var->constant_initializer != NULL) {
1112             if (existing->constant_initializer != NULL &&
1113                 !existing->data.is_implicit_initializer &&
1114                 !var->data.is_implicit_initializer) {
1115                if (!var->constant_initializer->has_value(existing->constant_initializer)) {
1116                   linker_error(prog, "initializers for %s "
1117                                "`%s' have differing values\n",
1118                                mode_string(var), var->name);
1119                   return;
1120                }
1121             } else {
1122                /* If the first-seen instance of a particular uniform did
1123                 * not have an initializer but a later instance does,
1124                 * replace the former with the later.
1125                 */
1126                if (!var->data.is_implicit_initializer)
1127                   variables->replace_variable(existing->name, var);
1128             }
1129          }
1130 
1131          if (var->data.has_initializer) {
1132             if (existing->data.has_initializer
1133                 && (var->constant_initializer == NULL
1134                     || existing->constant_initializer == NULL)) {
1135                linker_error(prog,
1136                             "shared global variable `%s' has multiple "
1137                             "non-constant initializers.\n",
1138                             var->name);
1139                return;
1140             }
1141          }
1142 
1143          if (existing->data.explicit_invariant != var->data.explicit_invariant) {
1144             linker_error(prog, "declarations for %s `%s' have "
1145                          "mismatching invariant qualifiers\n",
1146                          mode_string(var), var->name);
1147             return;
1148          }
1149          if (existing->data.centroid != var->data.centroid) {
1150             linker_error(prog, "declarations for %s `%s' have "
1151                          "mismatching centroid qualifiers\n",
1152                          mode_string(var), var->name);
1153             return;
1154          }
1155          if (existing->data.sample != var->data.sample) {
1156             linker_error(prog, "declarations for %s `%s` have "
1157                          "mismatching sample qualifiers\n",
1158                          mode_string(var), var->name);
1159             return;
1160          }
1161          if (existing->data.image_format != var->data.image_format) {
1162             linker_error(prog, "declarations for %s `%s` have "
1163                          "mismatching image format qualifiers\n",
1164                          mode_string(var), var->name);
1165             return;
1166          }
1167 
1168          /* Check the precision qualifier matches for uniform variables on
1169           * GLSL ES.
1170           */
1171          if (!consts->AllowGLSLRelaxedES &&
1172              prog->IsES && !var->get_interface_type() &&
1173              existing->data.precision != var->data.precision) {
1174             if ((existing->data.used && var->data.used) || prog->data->Version >= 300) {
1175                linker_error(prog, "declarations for %s `%s` have "
1176                             "mismatching precision qualifiers\n",
1177                             mode_string(var), var->name);
1178                return;
1179             } else {
1180                linker_warning(prog, "declarations for %s `%s` have "
1181                               "mismatching precision qualifiers\n",
1182                               mode_string(var), var->name);
1183             }
1184          }
1185 
1186          /* In OpenGL GLSL 3.20 spec, section 4.3.9:
1187           *
1188           *   "It is a link-time error if any particular shader interface
1189           *    contains:
1190           *
1191           *    - two different blocks, each having no instance name, and each
1192           *      having a member of the same name, or
1193           *
1194           *    - a variable outside a block, and a block with no instance name,
1195           *      where the variable has the same name as a member in the block."
1196           */
1197          const glsl_type *var_itype = var->get_interface_type();
1198          const glsl_type *existing_itype = existing->get_interface_type();
1199          if (var_itype != existing_itype) {
1200             if (!var_itype || !existing_itype) {
1201                linker_error(prog, "declarations for %s `%s` are inside block "
1202                             "`%s` and outside a block",
1203                             mode_string(var), var->name,
1204                             var_itype ? var_itype->name : existing_itype->name);
1205                return;
1206             } else if (strcmp(var_itype->name, existing_itype->name) != 0) {
1207                linker_error(prog, "declarations for %s `%s` are inside blocks "
1208                             "`%s` and `%s`",
1209                             mode_string(var), var->name,
1210                             existing_itype->name,
1211                             var_itype->name);
1212                return;
1213             }
1214          }
1215       } else
1216          variables->add_variable(var);
1217    }
1218 }
1219 
1220 
1221 /**
1222  * Perform validation of uniforms used across multiple shader stages
1223  */
1224 static void
cross_validate_uniforms(const struct gl_constants * consts,struct gl_shader_program * prog)1225 cross_validate_uniforms(const struct gl_constants *consts,
1226                         struct gl_shader_program *prog)
1227 {
1228    glsl_symbol_table variables;
1229    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1230       if (prog->_LinkedShaders[i] == NULL)
1231          continue;
1232 
1233       cross_validate_globals(consts, prog, prog->_LinkedShaders[i]->ir,
1234                              &variables, true);
1235    }
1236 }
1237 
1238 /**
1239  * Accumulates the array of buffer blocks and checks that all definitions of
1240  * blocks agree on their contents.
1241  */
1242 static bool
interstage_cross_validate_uniform_blocks(struct gl_shader_program * prog,bool validate_ssbo)1243 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog,
1244                                          bool validate_ssbo)
1245 {
1246    int *ifc_blk_stage_idx[MESA_SHADER_STAGES];
1247    struct gl_uniform_block *blks = NULL;
1248    unsigned *num_blks = validate_ssbo ? &prog->data->NumShaderStorageBlocks :
1249       &prog->data->NumUniformBlocks;
1250 
1251    unsigned max_num_buffer_blocks = 0;
1252    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1253       if (prog->_LinkedShaders[i]) {
1254          if (validate_ssbo) {
1255             max_num_buffer_blocks +=
1256                prog->_LinkedShaders[i]->Program->info.num_ssbos;
1257          } else {
1258             max_num_buffer_blocks +=
1259                prog->_LinkedShaders[i]->Program->info.num_ubos;
1260          }
1261       }
1262    }
1263 
1264    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1265       struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1266 
1267       ifc_blk_stage_idx[i] =
1268          (int *) malloc(sizeof(int) * max_num_buffer_blocks);
1269       for (unsigned int j = 0; j < max_num_buffer_blocks; j++)
1270          ifc_blk_stage_idx[i][j] = -1;
1271 
1272       if (sh == NULL)
1273          continue;
1274 
1275       unsigned sh_num_blocks;
1276       struct gl_uniform_block **sh_blks;
1277       if (validate_ssbo) {
1278          sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ssbos;
1279          sh_blks = sh->Program->sh.ShaderStorageBlocks;
1280       } else {
1281          sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ubos;
1282          sh_blks = sh->Program->sh.UniformBlocks;
1283       }
1284 
1285       for (unsigned int j = 0; j < sh_num_blocks; j++) {
1286          int index = link_cross_validate_uniform_block(prog->data, &blks,
1287                                                        num_blks, sh_blks[j]);
1288 
1289          if (index == -1) {
1290             linker_error(prog, "buffer block `%s' has mismatching "
1291                          "definitions\n", sh_blks[j]->name.string);
1292 
1293             for (unsigned k = 0; k <= i; k++) {
1294                free(ifc_blk_stage_idx[k]);
1295             }
1296 
1297             /* Reset the block count. This will help avoid various segfaults
1298              * from api calls that assume the array exists due to the count
1299              * being non-zero.
1300              */
1301             *num_blks = 0;
1302             return false;
1303          }
1304 
1305          ifc_blk_stage_idx[i][index] = j;
1306       }
1307    }
1308 
1309    /* Update per stage block pointers to point to the program list.
1310     * FIXME: We should be able to free the per stage blocks here.
1311     */
1312    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1313       for (unsigned j = 0; j < *num_blks; j++) {
1314          int stage_index = ifc_blk_stage_idx[i][j];
1315 
1316          if (stage_index != -1) {
1317             struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1318 
1319             struct gl_uniform_block **sh_blks = validate_ssbo ?
1320                sh->Program->sh.ShaderStorageBlocks :
1321                sh->Program->sh.UniformBlocks;
1322 
1323             blks[j].stageref |= sh_blks[stage_index]->stageref;
1324             sh_blks[stage_index] = &blks[j];
1325          }
1326       }
1327    }
1328 
1329    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1330       free(ifc_blk_stage_idx[i]);
1331    }
1332 
1333    if (validate_ssbo)
1334       prog->data->ShaderStorageBlocks = blks;
1335    else
1336       prog->data->UniformBlocks = blks;
1337 
1338    return true;
1339 }
1340 
1341 /**
1342  * Verifies the invariance of built-in special variables.
1343  */
1344 static bool
validate_invariant_builtins(struct gl_shader_program * prog,const gl_linked_shader * vert,const gl_linked_shader * frag)1345 validate_invariant_builtins(struct gl_shader_program *prog,
1346                             const gl_linked_shader *vert,
1347                             const gl_linked_shader *frag)
1348 {
1349    const ir_variable *var_vert;
1350    const ir_variable *var_frag;
1351 
1352    if (!vert || !frag)
1353       return true;
1354 
1355    /*
1356     * From OpenGL ES Shading Language 1.0 specification
1357     * (4.6.4 Invariance and Linkage):
1358     *     "The invariance of varyings that are declared in both the vertex and
1359     *     fragment shaders must match. For the built-in special variables,
1360     *     gl_FragCoord can only be declared invariant if and only if
1361     *     gl_Position is declared invariant. Similarly gl_PointCoord can only
1362     *     be declared invariant if and only if gl_PointSize is declared
1363     *     invariant. It is an error to declare gl_FrontFacing as invariant.
1364     *     The invariance of gl_FrontFacing is the same as the invariance of
1365     *     gl_Position."
1366     */
1367    var_frag = frag->symbols->get_variable("gl_FragCoord");
1368    if (var_frag && var_frag->data.invariant) {
1369       var_vert = vert->symbols->get_variable("gl_Position");
1370       if (var_vert && !var_vert->data.invariant) {
1371          linker_error(prog,
1372                "fragment shader built-in `%s' has invariant qualifier, "
1373                "but vertex shader built-in `%s' lacks invariant qualifier\n",
1374                var_frag->name, var_vert->name);
1375          return false;
1376       }
1377    }
1378 
1379    var_frag = frag->symbols->get_variable("gl_PointCoord");
1380    if (var_frag && var_frag->data.invariant) {
1381       var_vert = vert->symbols->get_variable("gl_PointSize");
1382       if (var_vert && !var_vert->data.invariant) {
1383          linker_error(prog,
1384                "fragment shader built-in `%s' has invariant qualifier, "
1385                "but vertex shader built-in `%s' lacks invariant qualifier\n",
1386                var_frag->name, var_vert->name);
1387          return false;
1388       }
1389    }
1390 
1391    var_frag = frag->symbols->get_variable("gl_FrontFacing");
1392    if (var_frag && var_frag->data.invariant) {
1393       linker_error(prog,
1394             "fragment shader built-in `%s' can not be declared as invariant\n",
1395             var_frag->name);
1396       return false;
1397    }
1398 
1399    return true;
1400 }
1401 
1402 /**
1403  * Populates a shaders symbol table with all global declarations
1404  */
1405 static void
populate_symbol_table(gl_linked_shader * sh,glsl_symbol_table * symbols)1406 populate_symbol_table(gl_linked_shader *sh, glsl_symbol_table *symbols)
1407 {
1408    sh->symbols = new(sh) glsl_symbol_table;
1409 
1410    _mesa_glsl_copy_symbols_from_table(sh->ir, symbols, sh->symbols);
1411 }
1412 
1413 
1414 /**
1415  * Remap variables referenced in an instruction tree
1416  *
1417  * This is used when instruction trees are cloned from one shader and placed in
1418  * another.  These trees will contain references to \c ir_variable nodes that
1419  * do not exist in the target shader.  This function finds these \c ir_variable
1420  * references and replaces the references with matching variables in the target
1421  * shader.
1422  *
1423  * If there is no matching variable in the target shader, a clone of the
1424  * \c ir_variable is made and added to the target shader.  The new variable is
1425  * added to \b both the instruction stream and the symbol table.
1426  *
1427  * \param inst         IR tree that is to be processed.
1428  * \param symbols      Symbol table containing global scope symbols in the
1429  *                     linked shader.
1430  * \param instructions Instruction stream where new variable declarations
1431  *                     should be added.
1432  */
1433 static void
remap_variables(ir_instruction * inst,struct gl_linked_shader * target,hash_table * temps)1434 remap_variables(ir_instruction *inst, struct gl_linked_shader *target,
1435                 hash_table *temps)
1436 {
1437    class remap_visitor : public ir_hierarchical_visitor {
1438    public:
1439          remap_visitor(struct gl_linked_shader *target, hash_table *temps)
1440       {
1441          this->target = target;
1442          this->symbols = target->symbols;
1443          this->instructions = target->ir;
1444          this->temps = temps;
1445       }
1446 
1447       virtual ir_visitor_status visit(ir_dereference_variable *ir)
1448       {
1449          if (ir->var->data.mode == ir_var_temporary) {
1450             hash_entry *entry = _mesa_hash_table_search(temps, ir->var);
1451             ir_variable *var = entry ? (ir_variable *) entry->data : NULL;
1452 
1453             assert(var != NULL);
1454             ir->var = var;
1455             return visit_continue;
1456          }
1457 
1458          ir_variable *const existing =
1459             this->symbols->get_variable(ir->var->name);
1460          if (existing != NULL)
1461             ir->var = existing;
1462          else {
1463             ir_variable *copy = ir->var->clone(this->target, NULL);
1464 
1465             this->symbols->add_variable(copy);
1466             this->instructions->push_head(copy);
1467             ir->var = copy;
1468          }
1469 
1470          return visit_continue;
1471       }
1472 
1473    private:
1474       struct gl_linked_shader *target;
1475       glsl_symbol_table *symbols;
1476       exec_list *instructions;
1477       hash_table *temps;
1478    };
1479 
1480    remap_visitor v(target, temps);
1481 
1482    inst->accept(&v);
1483 }
1484 
1485 
1486 /**
1487  * Move non-declarations from one instruction stream to another
1488  *
1489  * The intended usage pattern of this function is to pass the pointer to the
1490  * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
1491  * pointer) for \c last and \c false for \c make_copies on the first
1492  * call.  Successive calls pass the return value of the previous call for
1493  * \c last and \c true for \c make_copies.
1494  *
1495  * \param instructions Source instruction stream
1496  * \param last         Instruction after which new instructions should be
1497  *                     inserted in the target instruction stream
1498  * \param make_copies  Flag selecting whether instructions in \c instructions
1499  *                     should be copied (via \c ir_instruction::clone) into the
1500  *                     target list or moved.
1501  *
1502  * \return
1503  * The new "last" instruction in the target instruction stream.  This pointer
1504  * is suitable for use as the \c last parameter of a later call to this
1505  * function.
1506  */
1507 static exec_node *
move_non_declarations(exec_list * instructions,exec_node * last,bool make_copies,gl_linked_shader * target)1508 move_non_declarations(exec_list *instructions, exec_node *last,
1509                       bool make_copies, gl_linked_shader *target)
1510 {
1511    hash_table *temps = NULL;
1512 
1513    if (make_copies)
1514       temps = _mesa_pointer_hash_table_create(NULL);
1515 
1516    foreach_in_list_safe(ir_instruction, inst, instructions) {
1517       if (inst->as_function())
1518          continue;
1519 
1520       ir_variable *var = inst->as_variable();
1521       if ((var != NULL) && (var->data.mode != ir_var_temporary))
1522          continue;
1523 
1524       assert(inst->as_assignment()
1525              || inst->as_call()
1526              || inst->as_if() /* for initializers with the ?: operator */
1527              || ((var != NULL) && (var->data.mode == ir_var_temporary)));
1528 
1529       if (make_copies) {
1530          inst = inst->clone(target, NULL);
1531 
1532          if (var != NULL)
1533             _mesa_hash_table_insert(temps, var, inst);
1534          else
1535             remap_variables(inst, target, temps);
1536       } else {
1537          inst->remove();
1538       }
1539 
1540       last->insert_after(inst);
1541       last = inst;
1542    }
1543 
1544    if (make_copies)
1545       _mesa_hash_table_destroy(temps, NULL);
1546 
1547    return last;
1548 }
1549 
1550 
1551 /**
1552  * This class is only used in link_intrastage_shaders() below but declaring
1553  * it inside that function leads to compiler warnings with some versions of
1554  * gcc.
1555  */
1556 class array_sizing_visitor : public deref_type_updater {
1557 public:
1558    using deref_type_updater::visit;
1559 
array_sizing_visitor()1560    array_sizing_visitor()
1561       : mem_ctx(ralloc_context(NULL)),
1562         unnamed_interfaces(_mesa_pointer_hash_table_create(NULL))
1563    {
1564    }
1565 
~array_sizing_visitor()1566    ~array_sizing_visitor()
1567    {
1568       _mesa_hash_table_destroy(this->unnamed_interfaces, NULL);
1569       ralloc_free(this->mem_ctx);
1570    }
1571 
visit(ir_variable * var)1572    virtual ir_visitor_status visit(ir_variable *var)
1573    {
1574       const glsl_type *type_without_array;
1575       bool implicit_sized_array = var->data.implicit_sized_array;
1576       fixup_type(&var->type, var->data.max_array_access,
1577                  var->data.from_ssbo_unsized_array,
1578                  &implicit_sized_array);
1579       var->data.implicit_sized_array = implicit_sized_array;
1580       type_without_array = var->type->without_array();
1581       if (var->type->is_interface()) {
1582          if (interface_contains_unsized_arrays(var->type)) {
1583             const glsl_type *new_type =
1584                resize_interface_members(var->type,
1585                                         var->get_max_ifc_array_access(),
1586                                         var->is_in_shader_storage_block());
1587             var->type = new_type;
1588             var->change_interface_type(new_type);
1589          }
1590       } else if (type_without_array->is_interface()) {
1591          if (interface_contains_unsized_arrays(type_without_array)) {
1592             const glsl_type *new_type =
1593                resize_interface_members(type_without_array,
1594                                         var->get_max_ifc_array_access(),
1595                                         var->is_in_shader_storage_block());
1596             var->change_interface_type(new_type);
1597             var->type = update_interface_members_array(var->type, new_type);
1598          }
1599       } else if (const glsl_type *ifc_type = var->get_interface_type()) {
1600          /* Store a pointer to the variable in the unnamed_interfaces
1601           * hashtable.
1602           */
1603          hash_entry *entry =
1604                _mesa_hash_table_search(this->unnamed_interfaces,
1605                                        ifc_type);
1606 
1607          ir_variable **interface_vars = entry ? (ir_variable **) entry->data : NULL;
1608 
1609          if (interface_vars == NULL) {
1610             interface_vars = rzalloc_array(mem_ctx, ir_variable *,
1611                                            ifc_type->length);
1612             _mesa_hash_table_insert(this->unnamed_interfaces, ifc_type,
1613                                     interface_vars);
1614          }
1615          unsigned index = ifc_type->field_index(var->name);
1616          assert(index < ifc_type->length);
1617          assert(interface_vars[index] == NULL);
1618          interface_vars[index] = var;
1619       }
1620       return visit_continue;
1621    }
1622 
1623    /**
1624     * For each unnamed interface block that was discovered while running the
1625     * visitor, adjust the interface type to reflect the newly assigned array
1626     * sizes, and fix up the ir_variable nodes to point to the new interface
1627     * type.
1628     */
fixup_unnamed_interface_types()1629    void fixup_unnamed_interface_types()
1630    {
1631       hash_table_call_foreach(this->unnamed_interfaces,
1632                               fixup_unnamed_interface_type, NULL);
1633    }
1634 
1635 private:
1636    /**
1637     * If the type pointed to by \c type represents an unsized array, replace
1638     * it with a sized array whose size is determined by max_array_access.
1639     */
fixup_type(const glsl_type ** type,unsigned max_array_access,bool from_ssbo_unsized_array,bool * implicit_sized)1640    static void fixup_type(const glsl_type **type, unsigned max_array_access,
1641                           bool from_ssbo_unsized_array, bool *implicit_sized)
1642    {
1643       if (!from_ssbo_unsized_array && (*type)->is_unsized_array()) {
1644          *type = glsl_type::get_array_instance((*type)->fields.array,
1645                                                max_array_access + 1);
1646          *implicit_sized = true;
1647          assert(*type != NULL);
1648       }
1649    }
1650 
1651    static const glsl_type *
update_interface_members_array(const glsl_type * type,const glsl_type * new_interface_type)1652    update_interface_members_array(const glsl_type *type,
1653                                   const glsl_type *new_interface_type)
1654    {
1655       const glsl_type *element_type = type->fields.array;
1656       if (element_type->is_array()) {
1657          const glsl_type *new_array_type =
1658             update_interface_members_array(element_type, new_interface_type);
1659          return glsl_type::get_array_instance(new_array_type, type->length);
1660       } else {
1661          return glsl_type::get_array_instance(new_interface_type,
1662                                               type->length);
1663       }
1664    }
1665 
1666    /**
1667     * Determine whether the given interface type contains unsized arrays (if
1668     * it doesn't, array_sizing_visitor doesn't need to process it).
1669     */
interface_contains_unsized_arrays(const glsl_type * type)1670    static bool interface_contains_unsized_arrays(const glsl_type *type)
1671    {
1672       for (unsigned i = 0; i < type->length; i++) {
1673          const glsl_type *elem_type = type->fields.structure[i].type;
1674          if (elem_type->is_unsized_array())
1675             return true;
1676       }
1677       return false;
1678    }
1679 
1680    /**
1681     * Create a new interface type based on the given type, with unsized arrays
1682     * replaced by sized arrays whose size is determined by
1683     * max_ifc_array_access.
1684     */
1685    static const glsl_type *
resize_interface_members(const glsl_type * type,const int * max_ifc_array_access,bool is_ssbo)1686    resize_interface_members(const glsl_type *type,
1687                             const int *max_ifc_array_access,
1688                             bool is_ssbo)
1689    {
1690       unsigned num_fields = type->length;
1691       glsl_struct_field *fields = new glsl_struct_field[num_fields];
1692       memcpy(fields, type->fields.structure,
1693              num_fields * sizeof(*fields));
1694       for (unsigned i = 0; i < num_fields; i++) {
1695          bool implicit_sized_array = fields[i].implicit_sized_array;
1696          /* If SSBO last member is unsized array, we don't replace it by a sized
1697           * array.
1698           */
1699          if (is_ssbo && i == (num_fields - 1))
1700             fixup_type(&fields[i].type, max_ifc_array_access[i],
1701                        true, &implicit_sized_array);
1702          else
1703             fixup_type(&fields[i].type, max_ifc_array_access[i],
1704                        false, &implicit_sized_array);
1705          fields[i].implicit_sized_array = implicit_sized_array;
1706       }
1707       glsl_interface_packing packing =
1708          (glsl_interface_packing) type->interface_packing;
1709       bool row_major = (bool) type->interface_row_major;
1710       const glsl_type *new_ifc_type =
1711          glsl_type::get_interface_instance(fields, num_fields,
1712                                            packing, row_major, type->name);
1713       delete [] fields;
1714       return new_ifc_type;
1715    }
1716 
fixup_unnamed_interface_type(const void * key,void * data,void *)1717    static void fixup_unnamed_interface_type(const void *key, void *data,
1718                                             void *)
1719    {
1720       const glsl_type *ifc_type = (const glsl_type *) key;
1721       ir_variable **interface_vars = (ir_variable **) data;
1722       unsigned num_fields = ifc_type->length;
1723       glsl_struct_field *fields = new glsl_struct_field[num_fields];
1724       memcpy(fields, ifc_type->fields.structure,
1725              num_fields * sizeof(*fields));
1726       bool interface_type_changed = false;
1727       for (unsigned i = 0; i < num_fields; i++) {
1728          if (interface_vars[i] != NULL &&
1729              fields[i].type != interface_vars[i]->type) {
1730             fields[i].type = interface_vars[i]->type;
1731             interface_type_changed = true;
1732          }
1733       }
1734       if (!interface_type_changed) {
1735          delete [] fields;
1736          return;
1737       }
1738       glsl_interface_packing packing =
1739          (glsl_interface_packing) ifc_type->interface_packing;
1740       bool row_major = (bool) ifc_type->interface_row_major;
1741       const glsl_type *new_ifc_type =
1742          glsl_type::get_interface_instance(fields, num_fields, packing,
1743                                            row_major, ifc_type->name);
1744       delete [] fields;
1745       for (unsigned i = 0; i < num_fields; i++) {
1746          if (interface_vars[i] != NULL)
1747             interface_vars[i]->change_interface_type(new_ifc_type);
1748       }
1749    }
1750 
1751    /**
1752     * Memory context used to allocate the data in \c unnamed_interfaces.
1753     */
1754    void *mem_ctx;
1755 
1756    /**
1757     * Hash table from const glsl_type * to an array of ir_variable *'s
1758     * pointing to the ir_variables constituting each unnamed interface block.
1759     */
1760    hash_table *unnamed_interfaces;
1761 };
1762 
1763 static bool
validate_xfb_buffer_stride(const struct gl_constants * consts,unsigned idx,struct gl_shader_program * prog)1764 validate_xfb_buffer_stride(const struct gl_constants *consts, unsigned idx,
1765                            struct gl_shader_program *prog)
1766 {
1767    /* We will validate doubles at a later stage */
1768    if (prog->TransformFeedback.BufferStride[idx] % 4) {
1769       linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1770                    "multiple of 4 or if its applied to a type that is "
1771                    "or contains a double a multiple of 8.",
1772                    prog->TransformFeedback.BufferStride[idx]);
1773       return false;
1774    }
1775 
1776    if (prog->TransformFeedback.BufferStride[idx] / 4 >
1777        consts->MaxTransformFeedbackInterleavedComponents) {
1778       linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1779                    "limit has been exceeded.");
1780       return false;
1781    }
1782 
1783    return true;
1784 }
1785 
1786 /**
1787  * Check for conflicting xfb_stride default qualifiers and store buffer stride
1788  * for later use.
1789  */
1790 static void
link_xfb_stride_layout_qualifiers(const struct gl_constants * consts,struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders)1791 link_xfb_stride_layout_qualifiers(const struct gl_constants *consts,
1792                                   struct gl_shader_program *prog,
1793                                   struct gl_shader **shader_list,
1794                                   unsigned num_shaders)
1795 {
1796    for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
1797       prog->TransformFeedback.BufferStride[i] = 0;
1798    }
1799 
1800    for (unsigned i = 0; i < num_shaders; i++) {
1801       struct gl_shader *shader = shader_list[i];
1802 
1803       for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1804          if (shader->TransformFeedbackBufferStride[j]) {
1805             if (prog->TransformFeedback.BufferStride[j] == 0) {
1806                prog->TransformFeedback.BufferStride[j] =
1807                   shader->TransformFeedbackBufferStride[j];
1808                if (!validate_xfb_buffer_stride(consts, j, prog))
1809                   return;
1810             } else if (prog->TransformFeedback.BufferStride[j] !=
1811                        shader->TransformFeedbackBufferStride[j]){
1812                linker_error(prog,
1813                             "intrastage shaders defined with conflicting "
1814                             "xfb_stride for buffer %d (%d and %d)\n", j,
1815                             prog->TransformFeedback.BufferStride[j],
1816                             shader->TransformFeedbackBufferStride[j]);
1817                return;
1818             }
1819          }
1820       }
1821    }
1822 }
1823 
1824 /**
1825  * Check for conflicting bindless/bound sampler/image layout qualifiers at
1826  * global scope.
1827  */
1828 static void
link_bindless_layout_qualifiers(struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders)1829 link_bindless_layout_qualifiers(struct gl_shader_program *prog,
1830                                 struct gl_shader **shader_list,
1831                                 unsigned num_shaders)
1832 {
1833    bool bindless_sampler, bindless_image;
1834    bool bound_sampler, bound_image;
1835 
1836    bindless_sampler = bindless_image = false;
1837    bound_sampler = bound_image = false;
1838 
1839    for (unsigned i = 0; i < num_shaders; i++) {
1840       struct gl_shader *shader = shader_list[i];
1841 
1842       if (shader->bindless_sampler)
1843          bindless_sampler = true;
1844       if (shader->bindless_image)
1845          bindless_image = true;
1846       if (shader->bound_sampler)
1847          bound_sampler = true;
1848       if (shader->bound_image)
1849          bound_image = true;
1850 
1851       if ((bindless_sampler && bound_sampler) ||
1852           (bindless_image && bound_image)) {
1853          /* From section 4.4.6 of the ARB_bindless_texture spec:
1854           *
1855           *     "If both bindless_sampler and bound_sampler, or bindless_image
1856           *      and bound_image, are declared at global scope in any
1857           *      compilation unit, a link- time error will be generated."
1858           */
1859          linker_error(prog, "both bindless_sampler and bound_sampler, or "
1860                       "bindless_image and bound_image, can't be declared at "
1861                       "global scope");
1862       }
1863    }
1864 }
1865 
1866 /**
1867  * Check for conflicting viewport_relative settings across shaders, and sets
1868  * the value for the linked shader.
1869  */
1870 static void
link_layer_viewport_relative_qualifier(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1871 link_layer_viewport_relative_qualifier(struct gl_shader_program *prog,
1872                                        struct gl_program *gl_prog,
1873                                        struct gl_shader **shader_list,
1874                                        unsigned num_shaders)
1875 {
1876    unsigned i;
1877 
1878    /* Find first shader with explicit layer declaration */
1879    for (i = 0; i < num_shaders; i++) {
1880       if (shader_list[i]->redeclares_gl_layer) {
1881          gl_prog->info.layer_viewport_relative =
1882             shader_list[i]->layer_viewport_relative;
1883          break;
1884       }
1885    }
1886 
1887    /* Now make sure that each subsequent shader's explicit layer declaration
1888     * matches the first one's.
1889     */
1890    for (; i < num_shaders; i++) {
1891       if (shader_list[i]->redeclares_gl_layer &&
1892           shader_list[i]->layer_viewport_relative !=
1893           gl_prog->info.layer_viewport_relative) {
1894          linker_error(prog, "all gl_Layer redeclarations must have identical "
1895                       "viewport_relative settings");
1896       }
1897    }
1898 }
1899 
1900 /**
1901  * Performs the cross-validation of tessellation control shader vertices and
1902  * layout qualifiers for the attached tessellation control shaders,
1903  * and propagates them to the linked TCS and linked shader program.
1904  */
1905 static void
link_tcs_out_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1906 link_tcs_out_layout_qualifiers(struct gl_shader_program *prog,
1907                                struct gl_program *gl_prog,
1908                                struct gl_shader **shader_list,
1909                                unsigned num_shaders)
1910 {
1911    if (gl_prog->info.stage != MESA_SHADER_TESS_CTRL)
1912       return;
1913 
1914    gl_prog->info.tess.tcs_vertices_out = 0;
1915 
1916    /* From the GLSL 4.0 spec (chapter 4.3.8.2):
1917     *
1918     *     "All tessellation control shader layout declarations in a program
1919     *      must specify the same output patch vertex count.  There must be at
1920     *      least one layout qualifier specifying an output patch vertex count
1921     *      in any program containing tessellation control shaders; however,
1922     *      such a declaration is not required in all tessellation control
1923     *      shaders."
1924     */
1925 
1926    for (unsigned i = 0; i < num_shaders; i++) {
1927       struct gl_shader *shader = shader_list[i];
1928 
1929       if (shader->info.TessCtrl.VerticesOut != 0) {
1930          if (gl_prog->info.tess.tcs_vertices_out != 0 &&
1931              gl_prog->info.tess.tcs_vertices_out !=
1932              (unsigned) shader->info.TessCtrl.VerticesOut) {
1933             linker_error(prog, "tessellation control shader defined with "
1934                          "conflicting output vertex count (%d and %d)\n",
1935                          gl_prog->info.tess.tcs_vertices_out,
1936                          shader->info.TessCtrl.VerticesOut);
1937             return;
1938          }
1939          gl_prog->info.tess.tcs_vertices_out =
1940             shader->info.TessCtrl.VerticesOut;
1941       }
1942    }
1943 
1944    /* Just do the intrastage -> interstage propagation right now,
1945     * since we already know we're in the right type of shader program
1946     * for doing it.
1947     */
1948    if (gl_prog->info.tess.tcs_vertices_out == 0) {
1949       linker_error(prog, "tessellation control shader didn't declare "
1950                    "vertices out layout qualifier\n");
1951       return;
1952    }
1953 }
1954 
1955 
1956 /**
1957  * Performs the cross-validation of tessellation evaluation shader
1958  * primitive type, vertex spacing, ordering and point_mode layout qualifiers
1959  * for the attached tessellation evaluation shaders, and propagates them
1960  * to the linked TES and linked shader program.
1961  */
1962 static void
link_tes_in_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1963 link_tes_in_layout_qualifiers(struct gl_shader_program *prog,
1964                               struct gl_program *gl_prog,
1965                               struct gl_shader **shader_list,
1966                               unsigned num_shaders)
1967 {
1968    if (gl_prog->info.stage != MESA_SHADER_TESS_EVAL)
1969       return;
1970 
1971    int point_mode = -1;
1972    unsigned vertex_order = 0;
1973 
1974    gl_prog->info.tess._primitive_mode = TESS_PRIMITIVE_UNSPECIFIED;
1975    gl_prog->info.tess.spacing = TESS_SPACING_UNSPECIFIED;
1976 
1977    /* From the GLSL 4.0 spec (chapter 4.3.8.1):
1978     *
1979     *     "At least one tessellation evaluation shader (compilation unit) in
1980     *      a program must declare a primitive mode in its input layout.
1981     *      Declaration vertex spacing, ordering, and point mode identifiers is
1982     *      optional.  It is not required that all tessellation evaluation
1983     *      shaders in a program declare a primitive mode.  If spacing or
1984     *      vertex ordering declarations are omitted, the tessellation
1985     *      primitive generator will use equal spacing or counter-clockwise
1986     *      vertex ordering, respectively.  If a point mode declaration is
1987     *      omitted, the tessellation primitive generator will produce lines or
1988     *      triangles according to the primitive mode."
1989     */
1990 
1991    for (unsigned i = 0; i < num_shaders; i++) {
1992       struct gl_shader *shader = shader_list[i];
1993 
1994       if (shader->info.TessEval._PrimitiveMode != TESS_PRIMITIVE_UNSPECIFIED) {
1995          if (gl_prog->info.tess._primitive_mode != TESS_PRIMITIVE_UNSPECIFIED &&
1996              gl_prog->info.tess._primitive_mode !=
1997              shader->info.TessEval._PrimitiveMode) {
1998             linker_error(prog, "tessellation evaluation shader defined with "
1999                          "conflicting input primitive modes.\n");
2000             return;
2001          }
2002          gl_prog->info.tess._primitive_mode =
2003             shader->info.TessEval._PrimitiveMode;
2004       }
2005 
2006       if (shader->info.TessEval.Spacing != 0) {
2007          if (gl_prog->info.tess.spacing != 0 && gl_prog->info.tess.spacing !=
2008              shader->info.TessEval.Spacing) {
2009             linker_error(prog, "tessellation evaluation shader defined with "
2010                          "conflicting vertex spacing.\n");
2011             return;
2012          }
2013          gl_prog->info.tess.spacing = shader->info.TessEval.Spacing;
2014       }
2015 
2016       if (shader->info.TessEval.VertexOrder != 0) {
2017          if (vertex_order != 0 &&
2018              vertex_order != shader->info.TessEval.VertexOrder) {
2019             linker_error(prog, "tessellation evaluation shader defined with "
2020                          "conflicting ordering.\n");
2021             return;
2022          }
2023          vertex_order = shader->info.TessEval.VertexOrder;
2024       }
2025 
2026       if (shader->info.TessEval.PointMode != -1) {
2027          if (point_mode != -1 &&
2028              point_mode != shader->info.TessEval.PointMode) {
2029             linker_error(prog, "tessellation evaluation shader defined with "
2030                          "conflicting point modes.\n");
2031             return;
2032          }
2033          point_mode = shader->info.TessEval.PointMode;
2034       }
2035 
2036    }
2037 
2038    /* Just do the intrastage -> interstage propagation right now,
2039     * since we already know we're in the right type of shader program
2040     * for doing it.
2041     */
2042    if (gl_prog->info.tess._primitive_mode == TESS_PRIMITIVE_UNSPECIFIED) {
2043       linker_error(prog,
2044                    "tessellation evaluation shader didn't declare input "
2045                    "primitive modes.\n");
2046       return;
2047    }
2048 
2049    if (gl_prog->info.tess.spacing == TESS_SPACING_UNSPECIFIED)
2050       gl_prog->info.tess.spacing = TESS_SPACING_EQUAL;
2051 
2052    if (vertex_order == 0 || vertex_order == GL_CCW)
2053       gl_prog->info.tess.ccw = true;
2054    else
2055       gl_prog->info.tess.ccw = false;
2056 
2057 
2058    if (point_mode == -1 || point_mode == GL_FALSE)
2059       gl_prog->info.tess.point_mode = false;
2060    else
2061       gl_prog->info.tess.point_mode = true;
2062 }
2063 
2064 
2065 /**
2066  * Performs the cross-validation of layout qualifiers specified in
2067  * redeclaration of gl_FragCoord for the attached fragment shaders,
2068  * and propagates them to the linked FS and linked shader program.
2069  */
2070 static void
link_fs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)2071 link_fs_inout_layout_qualifiers(struct gl_shader_program *prog,
2072                                 struct gl_linked_shader *linked_shader,
2073                                 struct gl_shader **shader_list,
2074                                 unsigned num_shaders)
2075 {
2076    bool redeclares_gl_fragcoord = false;
2077    bool uses_gl_fragcoord = false;
2078    bool origin_upper_left = false;
2079    bool pixel_center_integer = false;
2080 
2081    if (linked_shader->Stage != MESA_SHADER_FRAGMENT ||
2082        (prog->data->Version < 150 &&
2083         !prog->ARB_fragment_coord_conventions_enable))
2084       return;
2085 
2086    for (unsigned i = 0; i < num_shaders; i++) {
2087       struct gl_shader *shader = shader_list[i];
2088       /* From the GLSL 1.50 spec, page 39:
2089        *
2090        *   "If gl_FragCoord is redeclared in any fragment shader in a program,
2091        *    it must be redeclared in all the fragment shaders in that program
2092        *    that have a static use gl_FragCoord."
2093        */
2094       if ((redeclares_gl_fragcoord && !shader->redeclares_gl_fragcoord &&
2095            shader->uses_gl_fragcoord)
2096           || (shader->redeclares_gl_fragcoord && !redeclares_gl_fragcoord &&
2097               uses_gl_fragcoord)) {
2098              linker_error(prog, "fragment shader defined with conflicting "
2099                          "layout qualifiers for gl_FragCoord\n");
2100       }
2101 
2102       /* From the GLSL 1.50 spec, page 39:
2103        *
2104        *   "All redeclarations of gl_FragCoord in all fragment shaders in a
2105        *    single program must have the same set of qualifiers."
2106        */
2107       if (redeclares_gl_fragcoord && shader->redeclares_gl_fragcoord &&
2108           (shader->origin_upper_left != origin_upper_left ||
2109            shader->pixel_center_integer != pixel_center_integer)) {
2110          linker_error(prog, "fragment shader defined with conflicting "
2111                       "layout qualifiers for gl_FragCoord\n");
2112       }
2113 
2114       /* Update the linked shader state.  Note that uses_gl_fragcoord should
2115        * accumulate the results.  The other values should replace.  If there
2116        * are multiple redeclarations, all the fields except uses_gl_fragcoord
2117        * are already known to be the same.
2118        */
2119       if (shader->redeclares_gl_fragcoord || shader->uses_gl_fragcoord) {
2120          redeclares_gl_fragcoord = shader->redeclares_gl_fragcoord;
2121          uses_gl_fragcoord |= shader->uses_gl_fragcoord;
2122          origin_upper_left = shader->origin_upper_left;
2123          pixel_center_integer = shader->pixel_center_integer;
2124       }
2125 
2126       linked_shader->Program->info.fs.early_fragment_tests |=
2127          shader->EarlyFragmentTests || shader->PostDepthCoverage;
2128       linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
2129       linked_shader->Program->info.fs.post_depth_coverage |=
2130          shader->PostDepthCoverage;
2131       linked_shader->Program->info.fs.pixel_interlock_ordered |=
2132          shader->PixelInterlockOrdered;
2133       linked_shader->Program->info.fs.pixel_interlock_unordered |=
2134          shader->PixelInterlockUnordered;
2135       linked_shader->Program->info.fs.sample_interlock_ordered |=
2136          shader->SampleInterlockOrdered;
2137       linked_shader->Program->info.fs.sample_interlock_unordered |=
2138          shader->SampleInterlockUnordered;
2139       linked_shader->Program->info.fs.advanced_blend_modes |= shader->BlendSupport;
2140    }
2141 
2142    linked_shader->Program->info.fs.pixel_center_integer = pixel_center_integer;
2143    linked_shader->Program->info.fs.origin_upper_left = origin_upper_left;
2144 }
2145 
2146 /**
2147  * Performs the cross-validation of geometry shader max_vertices and
2148  * primitive type layout qualifiers for the attached geometry shaders,
2149  * and propagates them to the linked GS and linked shader program.
2150  */
2151 static void
link_gs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)2152 link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
2153                                 struct gl_program *gl_prog,
2154                                 struct gl_shader **shader_list,
2155                                 unsigned num_shaders)
2156 {
2157    /* No in/out qualifiers defined for anything but GLSL 1.50+
2158     * geometry shaders so far.
2159     */
2160    if (gl_prog->info.stage != MESA_SHADER_GEOMETRY ||
2161        prog->data->Version < 150)
2162       return;
2163 
2164    int vertices_out = -1;
2165 
2166    gl_prog->info.gs.invocations = 0;
2167    gl_prog->info.gs.input_primitive = SHADER_PRIM_UNKNOWN;
2168    gl_prog->info.gs.output_primitive = SHADER_PRIM_UNKNOWN;
2169 
2170    /* From the GLSL 1.50 spec, page 46:
2171     *
2172     *     "All geometry shader output layout declarations in a program
2173     *      must declare the same layout and same value for
2174     *      max_vertices. There must be at least one geometry output
2175     *      layout declaration somewhere in a program, but not all
2176     *      geometry shaders (compilation units) are required to
2177     *      declare it."
2178     */
2179 
2180    for (unsigned i = 0; i < num_shaders; i++) {
2181       struct gl_shader *shader = shader_list[i];
2182 
2183       if (shader->info.Geom.InputType != SHADER_PRIM_UNKNOWN) {
2184          if (gl_prog->info.gs.input_primitive != SHADER_PRIM_UNKNOWN &&
2185              gl_prog->info.gs.input_primitive !=
2186              shader->info.Geom.InputType) {
2187             linker_error(prog, "geometry shader defined with conflicting "
2188                          "input types\n");
2189             return;
2190          }
2191          gl_prog->info.gs.input_primitive = (enum shader_prim)shader->info.Geom.InputType;
2192       }
2193 
2194       if (shader->info.Geom.OutputType != SHADER_PRIM_UNKNOWN) {
2195          if (gl_prog->info.gs.output_primitive != SHADER_PRIM_UNKNOWN &&
2196              gl_prog->info.gs.output_primitive !=
2197              shader->info.Geom.OutputType) {
2198             linker_error(prog, "geometry shader defined with conflicting "
2199                          "output types\n");
2200             return;
2201          }
2202          gl_prog->info.gs.output_primitive = (enum shader_prim)shader->info.Geom.OutputType;
2203       }
2204 
2205       if (shader->info.Geom.VerticesOut != -1) {
2206          if (vertices_out != -1 &&
2207              vertices_out != shader->info.Geom.VerticesOut) {
2208             linker_error(prog, "geometry shader defined with conflicting "
2209                          "output vertex count (%d and %d)\n",
2210                          vertices_out, shader->info.Geom.VerticesOut);
2211             return;
2212          }
2213          vertices_out = shader->info.Geom.VerticesOut;
2214       }
2215 
2216       if (shader->info.Geom.Invocations != 0) {
2217          if (gl_prog->info.gs.invocations != 0 &&
2218              gl_prog->info.gs.invocations !=
2219              (unsigned) shader->info.Geom.Invocations) {
2220             linker_error(prog, "geometry shader defined with conflicting "
2221                          "invocation count (%d and %d)\n",
2222                          gl_prog->info.gs.invocations,
2223                          shader->info.Geom.Invocations);
2224             return;
2225          }
2226          gl_prog->info.gs.invocations = shader->info.Geom.Invocations;
2227       }
2228    }
2229 
2230    /* Just do the intrastage -> interstage propagation right now,
2231     * since we already know we're in the right type of shader program
2232     * for doing it.
2233     */
2234    if (gl_prog->info.gs.input_primitive == SHADER_PRIM_UNKNOWN) {
2235       linker_error(prog,
2236                    "geometry shader didn't declare primitive input type\n");
2237       return;
2238    }
2239 
2240    if (gl_prog->info.gs.output_primitive == SHADER_PRIM_UNKNOWN) {
2241       linker_error(prog,
2242                    "geometry shader didn't declare primitive output type\n");
2243       return;
2244    }
2245 
2246    if (vertices_out == -1) {
2247       linker_error(prog,
2248                    "geometry shader didn't declare max_vertices\n");
2249       return;
2250    } else {
2251       gl_prog->info.gs.vertices_out = vertices_out;
2252    }
2253 
2254    if (gl_prog->info.gs.invocations == 0)
2255       gl_prog->info.gs.invocations = 1;
2256 }
2257 
2258 
2259 /**
2260  * Perform cross-validation of compute shader local_size_{x,y,z} layout and
2261  * derivative arrangement qualifiers for the attached compute shaders, and
2262  * propagate them to the linked CS and linked shader program.
2263  */
2264 static void
link_cs_input_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)2265 link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
2266                                 struct gl_program *gl_prog,
2267                                 struct gl_shader **shader_list,
2268                                 unsigned num_shaders)
2269 {
2270    /* This function is called for all shader stages, but it only has an effect
2271     * for compute shaders.
2272     */
2273    if (gl_prog->info.stage != MESA_SHADER_COMPUTE)
2274       return;
2275 
2276    for (int i = 0; i < 3; i++)
2277       gl_prog->info.workgroup_size[i] = 0;
2278 
2279    gl_prog->info.workgroup_size_variable = false;
2280 
2281    gl_prog->info.cs.derivative_group = DERIVATIVE_GROUP_NONE;
2282 
2283    /* From the ARB_compute_shader spec, in the section describing local size
2284     * declarations:
2285     *
2286     *     If multiple compute shaders attached to a single program object
2287     *     declare local work-group size, the declarations must be identical;
2288     *     otherwise a link-time error results. Furthermore, if a program
2289     *     object contains any compute shaders, at least one must contain an
2290     *     input layout qualifier specifying the local work sizes of the
2291     *     program, or a link-time error will occur.
2292     */
2293    for (unsigned sh = 0; sh < num_shaders; sh++) {
2294       struct gl_shader *shader = shader_list[sh];
2295 
2296       if (shader->info.Comp.LocalSize[0] != 0) {
2297          if (gl_prog->info.workgroup_size[0] != 0) {
2298             for (int i = 0; i < 3; i++) {
2299                if (gl_prog->info.workgroup_size[i] !=
2300                    shader->info.Comp.LocalSize[i]) {
2301                   linker_error(prog, "compute shader defined with conflicting "
2302                                "local sizes\n");
2303                   return;
2304                }
2305             }
2306          }
2307          for (int i = 0; i < 3; i++) {
2308             gl_prog->info.workgroup_size[i] =
2309                shader->info.Comp.LocalSize[i];
2310          }
2311       } else if (shader->info.Comp.LocalSizeVariable) {
2312          if (gl_prog->info.workgroup_size[0] != 0) {
2313             /* The ARB_compute_variable_group_size spec says:
2314              *
2315              *     If one compute shader attached to a program declares a
2316              *     variable local group size and a second compute shader
2317              *     attached to the same program declares a fixed local group
2318              *     size, a link-time error results.
2319              */
2320             linker_error(prog, "compute shader defined with both fixed and "
2321                          "variable local group size\n");
2322             return;
2323          }
2324          gl_prog->info.workgroup_size_variable = true;
2325       }
2326 
2327       enum gl_derivative_group group = shader->info.Comp.DerivativeGroup;
2328       if (group != DERIVATIVE_GROUP_NONE) {
2329          if (gl_prog->info.cs.derivative_group != DERIVATIVE_GROUP_NONE &&
2330              gl_prog->info.cs.derivative_group != group) {
2331             linker_error(prog, "compute shader defined with conflicting "
2332                          "derivative groups\n");
2333             return;
2334          }
2335          gl_prog->info.cs.derivative_group = group;
2336       }
2337    }
2338 
2339    /* Just do the intrastage -> interstage propagation right now,
2340     * since we already know we're in the right type of shader program
2341     * for doing it.
2342     */
2343    if (gl_prog->info.workgroup_size[0] == 0 &&
2344        !gl_prog->info.workgroup_size_variable) {
2345       linker_error(prog, "compute shader must contain a fixed or a variable "
2346                          "local group size\n");
2347       return;
2348    }
2349 
2350    if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
2351       if (gl_prog->info.workgroup_size[0] % 2 != 0) {
2352          linker_error(prog, "derivative_group_quadsNV must be used with a "
2353                       "local group size whose first dimension "
2354                       "is a multiple of 2\n");
2355          return;
2356       }
2357       if (gl_prog->info.workgroup_size[1] % 2 != 0) {
2358          linker_error(prog, "derivative_group_quadsNV must be used with a local"
2359                       "group size whose second dimension "
2360                       "is a multiple of 2\n");
2361          return;
2362       }
2363    } else if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
2364       if ((gl_prog->info.workgroup_size[0] *
2365            gl_prog->info.workgroup_size[1] *
2366            gl_prog->info.workgroup_size[2]) % 4 != 0) {
2367          linker_error(prog, "derivative_group_linearNV must be used with a "
2368                       "local group size whose total number of invocations "
2369                       "is a multiple of 4\n");
2370          return;
2371       }
2372    }
2373 }
2374 
2375 /**
2376  * Link all out variables on a single stage which are not
2377  * directly used in a shader with the main function.
2378  */
2379 static void
link_output_variables(struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)2380 link_output_variables(struct gl_linked_shader *linked_shader,
2381                       struct gl_shader **shader_list,
2382                       unsigned num_shaders)
2383 {
2384    struct glsl_symbol_table *symbols = linked_shader->symbols;
2385 
2386    for (unsigned i = 0; i < num_shaders; i++) {
2387 
2388       /* Skip shader object with main function */
2389       if (shader_list[i]->symbols->get_function("main"))
2390          continue;
2391 
2392       foreach_in_list(ir_instruction, ir, shader_list[i]->ir) {
2393          if (ir->ir_type != ir_type_variable)
2394             continue;
2395 
2396          ir_variable *var = (ir_variable *) ir;
2397 
2398          if (var->data.mode == ir_var_shader_out &&
2399                !symbols->get_variable(var->name)) {
2400             var = var->clone(linked_shader, NULL);
2401             symbols->add_variable(var);
2402             linked_shader->ir->push_head(var);
2403          }
2404       }
2405    }
2406 
2407    return;
2408 }
2409 
2410 
2411 /**
2412  * Combine a group of shaders for a single stage to generate a linked shader
2413  *
2414  * \note
2415  * If this function is supplied a single shader, it is cloned, and the new
2416  * shader is returned.
2417  */
2418 struct gl_linked_shader *
link_intrastage_shaders(void * mem_ctx,struct gl_context * ctx,struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders,bool allow_missing_main)2419 link_intrastage_shaders(void *mem_ctx,
2420                         struct gl_context *ctx,
2421                         struct gl_shader_program *prog,
2422                         struct gl_shader **shader_list,
2423                         unsigned num_shaders,
2424                         bool allow_missing_main)
2425 {
2426    struct gl_uniform_block *ubo_blocks = NULL;
2427    struct gl_uniform_block *ssbo_blocks = NULL;
2428    unsigned num_ubo_blocks = 0;
2429    unsigned num_ssbo_blocks = 0;
2430 
2431    /* Check that global variables defined in multiple shaders are consistent.
2432     */
2433    glsl_symbol_table variables;
2434    for (unsigned i = 0; i < num_shaders; i++) {
2435       if (shader_list[i] == NULL)
2436          continue;
2437       cross_validate_globals(&ctx->Const, prog, shader_list[i]->ir, &variables,
2438                              false);
2439    }
2440 
2441    if (!prog->data->LinkStatus)
2442       return NULL;
2443 
2444    /* Check that interface blocks defined in multiple shaders are consistent.
2445     */
2446    validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
2447                                         num_shaders);
2448    if (!prog->data->LinkStatus)
2449       return NULL;
2450 
2451    /* Check that there is only a single definition of each function signature
2452     * across all shaders.
2453     */
2454    for (unsigned i = 0; i < (num_shaders - 1); i++) {
2455       foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
2456          ir_function *const f = node->as_function();
2457 
2458          if (f == NULL)
2459             continue;
2460 
2461          for (unsigned j = i + 1; j < num_shaders; j++) {
2462             ir_function *const other =
2463                shader_list[j]->symbols->get_function(f->name);
2464 
2465             /* If the other shader has no function (and therefore no function
2466              * signatures) with the same name, skip to the next shader.
2467              */
2468             if (other == NULL)
2469                continue;
2470 
2471             foreach_in_list(ir_function_signature, sig, &f->signatures) {
2472                if (!sig->is_defined)
2473                   continue;
2474 
2475                ir_function_signature *other_sig =
2476                   other->exact_matching_signature(NULL, &sig->parameters);
2477 
2478                if (other_sig != NULL && other_sig->is_defined) {
2479                   linker_error(prog, "function `%s' is multiply defined\n",
2480                                f->name);
2481                   return NULL;
2482                }
2483             }
2484          }
2485       }
2486    }
2487 
2488    /* Find the shader that defines main, and make a clone of it.
2489     *
2490     * Starting with the clone, search for undefined references.  If one is
2491     * found, find the shader that defines it.  Clone the reference and add
2492     * it to the shader.  Repeat until there are no undefined references or
2493     * until a reference cannot be resolved.
2494     */
2495    gl_shader *main = NULL;
2496    for (unsigned i = 0; i < num_shaders; i++) {
2497       if (_mesa_get_main_function_signature(shader_list[i]->symbols)) {
2498          main = shader_list[i];
2499          break;
2500       }
2501    }
2502 
2503    if (main == NULL && allow_missing_main)
2504       main = shader_list[0];
2505 
2506    if (main == NULL) {
2507       linker_error(prog, "%s shader lacks `main'\n",
2508                    _mesa_shader_stage_to_string(shader_list[0]->Stage));
2509       return NULL;
2510    }
2511 
2512    gl_linked_shader *linked = rzalloc(NULL, struct gl_linked_shader);
2513    linked->Stage = shader_list[0]->Stage;
2514 
2515    /* Create program and attach it to the linked shader */
2516    struct gl_program *gl_prog =
2517       ctx->Driver.NewProgram(ctx, shader_list[0]->Stage, prog->Name, false);
2518    if (!gl_prog) {
2519       prog->data->LinkStatus = LINKING_FAILURE;
2520       _mesa_delete_linked_shader(ctx, linked);
2521       return NULL;
2522    }
2523 
2524    _mesa_reference_shader_program_data(&gl_prog->sh.data, prog->data);
2525 
2526    /* Don't use _mesa_reference_program() just take ownership */
2527    linked->Program = gl_prog;
2528 
2529    linked->ir = new(linked) exec_list;
2530    clone_ir_list(mem_ctx, linked->ir, main->ir);
2531 
2532    link_fs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
2533    link_tcs_out_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2534    link_tes_in_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2535    link_gs_inout_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2536    link_cs_input_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2537 
2538    if (linked->Stage != MESA_SHADER_FRAGMENT)
2539       link_xfb_stride_layout_qualifiers(&ctx->Const, prog, shader_list, num_shaders);
2540 
2541    link_bindless_layout_qualifiers(prog, shader_list, num_shaders);
2542 
2543    link_layer_viewport_relative_qualifier(prog, gl_prog, shader_list, num_shaders);
2544 
2545    populate_symbol_table(linked, shader_list[0]->symbols);
2546 
2547    /* The pointer to the main function in the final linked shader (i.e., the
2548     * copy of the original shader that contained the main function).
2549     */
2550    ir_function_signature *const main_sig =
2551       _mesa_get_main_function_signature(linked->symbols);
2552 
2553    /* Move any instructions other than variable declarations or function
2554     * declarations into main.
2555     */
2556    if (main_sig != NULL) {
2557       exec_node *insertion_point =
2558          move_non_declarations(linked->ir, &main_sig->body.head_sentinel, false,
2559                                linked);
2560 
2561       for (unsigned i = 0; i < num_shaders; i++) {
2562          if (shader_list[i] == main)
2563             continue;
2564 
2565          insertion_point = move_non_declarations(shader_list[i]->ir,
2566                                                  insertion_point, true, linked);
2567       }
2568    }
2569 
2570    if (!link_function_calls(prog, linked, shader_list, num_shaders)) {
2571       _mesa_delete_linked_shader(ctx, linked);
2572       return NULL;
2573    }
2574 
2575    if (linked->Stage != MESA_SHADER_FRAGMENT)
2576       link_output_variables(linked, shader_list, num_shaders);
2577 
2578    /* Make a pass over all variable declarations to ensure that arrays with
2579     * unspecified sizes have a size specified.  The size is inferred from the
2580     * max_array_access field.
2581     */
2582    array_sizing_visitor v;
2583    v.run(linked->ir);
2584    v.fixup_unnamed_interface_types();
2585 
2586    /* Now that we know the sizes of all the arrays, we can replace .length()
2587     * calls with a constant expression.
2588     */
2589    array_length_to_const_visitor len_v;
2590    len_v.run(linked->ir);
2591 
2592    /* Link up uniform blocks defined within this stage. */
2593    link_uniform_blocks(mem_ctx, &ctx->Const, prog, linked, &ubo_blocks,
2594                        &num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
2595 
2596    const unsigned max_uniform_blocks =
2597       ctx->Const.Program[linked->Stage].MaxUniformBlocks;
2598    if (num_ubo_blocks > max_uniform_blocks) {
2599       linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
2600                    _mesa_shader_stage_to_string(linked->Stage),
2601                    num_ubo_blocks, max_uniform_blocks);
2602    }
2603 
2604    const unsigned max_shader_storage_blocks =
2605       ctx->Const.Program[linked->Stage].MaxShaderStorageBlocks;
2606    if (num_ssbo_blocks > max_shader_storage_blocks) {
2607       linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
2608                    _mesa_shader_stage_to_string(linked->Stage),
2609                    num_ssbo_blocks, max_shader_storage_blocks);
2610    }
2611 
2612    if (!prog->data->LinkStatus) {
2613       _mesa_delete_linked_shader(ctx, linked);
2614       return NULL;
2615    }
2616 
2617    /* Copy ubo blocks to linked shader list */
2618    linked->Program->sh.UniformBlocks =
2619       ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
2620    ralloc_steal(linked, ubo_blocks);
2621    for (unsigned i = 0; i < num_ubo_blocks; i++) {
2622       linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
2623    }
2624    linked->Program->sh.NumUniformBlocks = num_ubo_blocks;
2625    linked->Program->info.num_ubos = num_ubo_blocks;
2626 
2627    /* Copy ssbo blocks to linked shader list */
2628    linked->Program->sh.ShaderStorageBlocks =
2629       ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
2630    ralloc_steal(linked, ssbo_blocks);
2631    for (unsigned i = 0; i < num_ssbo_blocks; i++) {
2632       linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
2633    }
2634    linked->Program->info.num_ssbos = num_ssbo_blocks;
2635 
2636    /* At this point linked should contain all of the linked IR, so
2637     * validate it to make sure nothing went wrong.
2638     */
2639    validate_ir_tree(linked->ir);
2640 
2641    /* Set the size of geometry shader input arrays */
2642    if (linked->Stage == MESA_SHADER_GEOMETRY) {
2643       unsigned num_vertices =
2644          vertices_per_prim(gl_prog->info.gs.input_primitive);
2645       array_resize_visitor input_resize_visitor(num_vertices, prog,
2646                                                 MESA_SHADER_GEOMETRY);
2647       foreach_in_list(ir_instruction, ir, linked->ir) {
2648          ir->accept(&input_resize_visitor);
2649       }
2650    }
2651 
2652    if (ctx->Const.VertexID_is_zero_based)
2653       lower_vertex_id(linked);
2654 
2655    if (ctx->Const.LowerCsDerivedVariables)
2656       lower_cs_derived(linked);
2657 
2658    /* Set the linked source SHA1. */
2659    if (num_shaders == 1) {
2660       memcpy(linked->linked_source_sha1, shader_list[0]->compiled_source_sha1,
2661              SHA1_DIGEST_LENGTH);
2662    } else {
2663       struct mesa_sha1 sha1_ctx;
2664       _mesa_sha1_init(&sha1_ctx);
2665 
2666       for (unsigned i = 0; i < num_shaders; i++) {
2667          if (shader_list[i] == NULL)
2668             continue;
2669 
2670          _mesa_sha1_update(&sha1_ctx, shader_list[i]->compiled_source_sha1,
2671                            SHA1_DIGEST_LENGTH);
2672       }
2673       _mesa_sha1_final(&sha1_ctx, linked->linked_source_sha1);
2674    }
2675 
2676    return linked;
2677 }
2678 
2679 /**
2680  * Update the sizes of linked shader uniform arrays to the maximum
2681  * array index used.
2682  *
2683  * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
2684  *
2685  *     If one or more elements of an array are active,
2686  *     GetActiveUniform will return the name of the array in name,
2687  *     subject to the restrictions listed above. The type of the array
2688  *     is returned in type. The size parameter contains the highest
2689  *     array element index used, plus one. The compiler or linker
2690  *     determines the highest index used.  There will be only one
2691  *     active uniform reported by the GL per uniform array.
2692 
2693  */
2694 static void
update_array_sizes(struct gl_shader_program * prog)2695 update_array_sizes(struct gl_shader_program *prog)
2696 {
2697    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
2698          if (prog->_LinkedShaders[i] == NULL)
2699             continue;
2700 
2701       bool types_were_updated = false;
2702 
2703       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
2704          ir_variable *const var = node->as_variable();
2705 
2706          if ((var == NULL) || (var->data.mode != ir_var_uniform) ||
2707              !var->type->is_array())
2708             continue;
2709 
2710          /* GL_ARB_uniform_buffer_object says that std140 uniforms
2711           * will not be eliminated.  Since we always do std140, just
2712           * don't resize arrays in UBOs.
2713           *
2714           * Atomic counters are supposed to get deterministic
2715           * locations assigned based on the declaration ordering and
2716           * sizes, array compaction would mess that up.
2717           *
2718           * Subroutine uniforms are not removed.
2719           */
2720          if (var->is_in_buffer_block() || var->type->contains_atomic() ||
2721              var->type->contains_subroutine() || var->constant_initializer)
2722             continue;
2723 
2724          int size = var->data.max_array_access;
2725          for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
2726                if (prog->_LinkedShaders[j] == NULL)
2727                   continue;
2728 
2729             foreach_in_list(ir_instruction, node2, prog->_LinkedShaders[j]->ir) {
2730                ir_variable *other_var = node2->as_variable();
2731                if (!other_var)
2732                   continue;
2733 
2734                if (strcmp(var->name, other_var->name) == 0 &&
2735                    other_var->data.max_array_access > size) {
2736                   size = other_var->data.max_array_access;
2737                }
2738             }
2739          }
2740 
2741          if (size + 1 != (int)var->type->length) {
2742             /* If this is a built-in uniform (i.e., it's backed by some
2743              * fixed-function state), adjust the number of state slots to
2744              * match the new array size.  The number of slots per array entry
2745              * is not known.  It seems safe to assume that the total number of
2746              * slots is an integer multiple of the number of array elements.
2747              * Determine the number of slots per array element by dividing by
2748              * the old (total) size.
2749              */
2750             const unsigned num_slots = var->get_num_state_slots();
2751             if (num_slots > 0) {
2752                var->set_num_state_slots((size + 1)
2753                                         * (num_slots / var->type->length));
2754             }
2755 
2756             var->type = glsl_type::get_array_instance(var->type->fields.array,
2757                                                       size + 1);
2758             types_were_updated = true;
2759          }
2760       }
2761 
2762       /* Update the types of dereferences in case we changed any. */
2763       if (types_were_updated) {
2764          deref_type_updater v;
2765          v.run(prog->_LinkedShaders[i]->ir);
2766       }
2767    }
2768 }
2769 
2770 /**
2771  * Resize tessellation evaluation per-vertex inputs to the size of
2772  * tessellation control per-vertex outputs.
2773  */
2774 static void
resize_tes_inputs(const struct gl_constants * consts,struct gl_shader_program * prog)2775 resize_tes_inputs(const struct gl_constants *consts,
2776                   struct gl_shader_program *prog)
2777 {
2778    if (prog->_LinkedShaders[MESA_SHADER_TESS_EVAL] == NULL)
2779       return;
2780 
2781    gl_linked_shader *const tcs = prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
2782    gl_linked_shader *const tes = prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
2783 
2784    /* If no control shader is present, then the TES inputs are statically
2785     * sized to MaxPatchVertices; the actual size of the arrays won't be
2786     * known until draw time.
2787     */
2788    const int num_vertices = tcs
2789       ? tcs->Program->info.tess.tcs_vertices_out
2790       : consts->MaxPatchVertices;
2791 
2792    array_resize_visitor input_resize_visitor(num_vertices, prog,
2793                                              MESA_SHADER_TESS_EVAL);
2794    foreach_in_list(ir_instruction, ir, tes->ir) {
2795       ir->accept(&input_resize_visitor);
2796    }
2797 
2798    if (tcs) {
2799       /* Convert the gl_PatchVerticesIn system value into a constant, since
2800        * the value is known at this point.
2801        */
2802       foreach_in_list(ir_instruction, ir, tes->ir) {
2803          ir_variable *var = ir->as_variable();
2804          if (var && var->data.mode == ir_var_system_value &&
2805              var->data.location == SYSTEM_VALUE_VERTICES_IN) {
2806             void *mem_ctx = ralloc_parent(var);
2807             var->data.location = 0;
2808             var->data.explicit_location = false;
2809             var->data.mode = ir_var_auto;
2810             var->constant_value = new(mem_ctx) ir_constant(num_vertices);
2811          }
2812       }
2813    }
2814 }
2815 
2816 /**
2817  * Find a contiguous set of available bits in a bitmask.
2818  *
2819  * \param used_mask     Bits representing used (1) and unused (0) locations
2820  * \param needed_count  Number of contiguous bits needed.
2821  *
2822  * \return
2823  * Base location of the available bits on success or -1 on failure.
2824  */
2825 static int
find_available_slots(unsigned used_mask,unsigned needed_count)2826 find_available_slots(unsigned used_mask, unsigned needed_count)
2827 {
2828    unsigned needed_mask = (1 << needed_count) - 1;
2829    const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
2830 
2831    /* The comparison to 32 is redundant, but without it GCC emits "warning:
2832     * cannot optimize possibly infinite loops" for the loop below.
2833     */
2834    if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
2835       return -1;
2836 
2837    for (int i = 0; i <= max_bit_to_test; i++) {
2838       if ((needed_mask & ~used_mask) == needed_mask)
2839          return i;
2840 
2841       needed_mask <<= 1;
2842    }
2843 
2844    return -1;
2845 }
2846 
2847 
2848 #define SAFE_MASK_FROM_INDEX(i) (((i) >= 32) ? ~0 : ((1 << (i)) - 1))
2849 
2850 /**
2851  * Assign locations for either VS inputs or FS outputs.
2852  *
2853  * \param mem_ctx        Temporary ralloc context used for linking.
2854  * \param prog           Shader program whose variables need locations
2855  *                       assigned.
2856  * \param constants      Driver specific constant values for the program.
2857  * \param target_index   Selector for the program target to receive location
2858  *                       assignmnets.  Must be either \c MESA_SHADER_VERTEX or
2859  *                       \c MESA_SHADER_FRAGMENT.
2860  * \param do_assignment  Whether we are actually marking the assignment or we
2861  *                       are just doing a dry-run checking.
2862  *
2863  * \return
2864  * If locations are (or can be, in case of dry-running) successfully assigned,
2865  * true is returned.  Otherwise an error is emitted to the shader link log and
2866  * false is returned.
2867  */
2868 static bool
assign_attribute_or_color_locations(void * mem_ctx,gl_shader_program * prog,const struct gl_constants * constants,unsigned target_index,bool do_assignment)2869 assign_attribute_or_color_locations(void *mem_ctx,
2870                                     gl_shader_program *prog,
2871                                     const struct gl_constants *constants,
2872                                     unsigned target_index,
2873                                     bool do_assignment)
2874 {
2875    /* Maximum number of generic locations.  This corresponds to either the
2876     * maximum number of draw buffers or the maximum number of generic
2877     * attributes.
2878     */
2879    unsigned max_index = (target_index == MESA_SHADER_VERTEX) ?
2880       constants->Program[target_index].MaxAttribs :
2881       MAX2(constants->MaxDrawBuffers, constants->MaxDualSourceDrawBuffers);
2882 
2883    /* Mark invalid locations as being used.
2884     */
2885    unsigned used_locations = ~SAFE_MASK_FROM_INDEX(max_index);
2886    unsigned double_storage_locations = 0;
2887 
2888    assert((target_index == MESA_SHADER_VERTEX)
2889           || (target_index == MESA_SHADER_FRAGMENT));
2890 
2891    gl_linked_shader *const sh = prog->_LinkedShaders[target_index];
2892    if (sh == NULL)
2893       return true;
2894 
2895    /* Operate in a total of four passes.
2896     *
2897     * 1. Invalidate the location assignments for all vertex shader inputs.
2898     *
2899     * 2. Assign locations for inputs that have user-defined (via
2900     *    glBindVertexAttribLocation) locations and outputs that have
2901     *    user-defined locations (via glBindFragDataLocation).
2902     *
2903     * 3. Sort the attributes without assigned locations by number of slots
2904     *    required in decreasing order.  Fragmentation caused by attribute
2905     *    locations assigned by the application may prevent large attributes
2906     *    from having enough contiguous space.
2907     *
2908     * 4. Assign locations to any inputs without assigned locations.
2909     */
2910 
2911    const int generic_base = (target_index == MESA_SHADER_VERTEX)
2912       ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
2913 
2914    const enum ir_variable_mode direction =
2915       (target_index == MESA_SHADER_VERTEX)
2916       ? ir_var_shader_in : ir_var_shader_out;
2917 
2918 
2919    /* Temporary storage for the set of attributes that need locations assigned.
2920     */
2921    struct temp_attr {
2922       unsigned slots;
2923       ir_variable *var;
2924 
2925       /* Used below in the call to qsort. */
2926       static int compare(const void *a, const void *b)
2927       {
2928          const temp_attr *const l = (const temp_attr *) a;
2929          const temp_attr *const r = (const temp_attr *) b;
2930 
2931          /* Reversed because we want a descending order sort below. */
2932          return r->slots - l->slots;
2933       }
2934    } to_assign[32];
2935    assert(max_index <= 32);
2936 
2937    /* Temporary array for the set of attributes that have locations assigned,
2938     * for the purpose of checking overlapping slots/components of (non-ES)
2939     * fragment shader outputs.
2940     */
2941    ir_variable *assigned[12 * 4]; /* (max # of FS outputs) * # components */
2942    unsigned assigned_attr = 0;
2943 
2944    unsigned num_attr = 0;
2945 
2946    foreach_in_list(ir_instruction, node, sh->ir) {
2947       ir_variable *const var = node->as_variable();
2948 
2949       if ((var == NULL) || (var->data.mode != (unsigned) direction))
2950          continue;
2951 
2952       if (var->data.explicit_location) {
2953          var->data.is_unmatched_generic_inout = 0;
2954          if ((var->data.location >= (int)(max_index + generic_base))
2955              || (var->data.location < 0)) {
2956             linker_error(prog,
2957                          "invalid explicit location %d specified for `%s'\n",
2958                          (var->data.location < 0)
2959                          ? var->data.location
2960                          : var->data.location - generic_base,
2961                          var->name);
2962             return false;
2963          }
2964       } else if (target_index == MESA_SHADER_VERTEX) {
2965          unsigned binding;
2966 
2967          if (prog->AttributeBindings->get(binding, var->name)) {
2968             assert(binding >= VERT_ATTRIB_GENERIC0);
2969             var->data.location = binding;
2970             var->data.is_unmatched_generic_inout = 0;
2971          }
2972       } else if (target_index == MESA_SHADER_FRAGMENT) {
2973          unsigned binding;
2974          unsigned index;
2975          const char *name = var->name;
2976          const glsl_type *type = var->type;
2977 
2978          while (type) {
2979             /* Check if there's a binding for the variable name */
2980             if (prog->FragDataBindings->get(binding, name)) {
2981                assert(binding >= FRAG_RESULT_DATA0);
2982                var->data.location = binding;
2983                var->data.is_unmatched_generic_inout = 0;
2984 
2985                if (prog->FragDataIndexBindings->get(index, name)) {
2986                   var->data.index = index;
2987                }
2988                break;
2989             }
2990 
2991             /* If not, but it's an array type, look for name[0] */
2992             if (type->is_array()) {
2993                name = ralloc_asprintf(mem_ctx, "%s[0]", name);
2994                type = type->fields.array;
2995                continue;
2996             }
2997 
2998             break;
2999          }
3000       }
3001 
3002       if (strcmp(var->name, "gl_LastFragData") == 0)
3003          continue;
3004 
3005       /* From GL4.5 core spec, section 15.2 (Shader Execution):
3006        *
3007        *     "Output binding assignments will cause LinkProgram to fail:
3008        *     ...
3009        *     If the program has an active output assigned to a location greater
3010        *     than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
3011        *     an active output assigned an index greater than or equal to one;"
3012        */
3013       if (target_index == MESA_SHADER_FRAGMENT && var->data.index >= 1 &&
3014           var->data.location - generic_base >=
3015           (int) constants->MaxDualSourceDrawBuffers) {
3016          linker_error(prog,
3017                       "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
3018                       "with index %u for %s\n",
3019                       var->data.location - generic_base, var->data.index,
3020                       var->name);
3021          return false;
3022       }
3023 
3024       const unsigned slots = var->type->count_attribute_slots(target_index == MESA_SHADER_VERTEX);
3025 
3026       /* If the variable is not a built-in and has a location statically
3027        * assigned in the shader (presumably via a layout qualifier), make sure
3028        * that it doesn't collide with other assigned locations.  Otherwise,
3029        * add it to the list of variables that need linker-assigned locations.
3030        */
3031       if (var->data.location != -1) {
3032          if (var->data.location >= generic_base && var->data.index < 1) {
3033             /* From page 61 of the OpenGL 4.0 spec:
3034              *
3035              *     "LinkProgram will fail if the attribute bindings assigned
3036              *     by BindAttribLocation do not leave not enough space to
3037              *     assign a location for an active matrix attribute or an
3038              *     active attribute array, both of which require multiple
3039              *     contiguous generic attributes."
3040              *
3041              * I think above text prohibits the aliasing of explicit and
3042              * automatic assignments. But, aliasing is allowed in manual
3043              * assignments of attribute locations. See below comments for
3044              * the details.
3045              *
3046              * From OpenGL 4.0 spec, page 61:
3047              *
3048              *     "It is possible for an application to bind more than one
3049              *     attribute name to the same location. This is referred to as
3050              *     aliasing. This will only work if only one of the aliased
3051              *     attributes is active in the executable program, or if no
3052              *     path through the shader consumes more than one attribute of
3053              *     a set of attributes aliased to the same location. A link
3054              *     error can occur if the linker determines that every path
3055              *     through the shader consumes multiple aliased attributes,
3056              *     but implementations are not required to generate an error
3057              *     in this case."
3058              *
3059              * From GLSL 4.30 spec, page 54:
3060              *
3061              *    "A program will fail to link if any two non-vertex shader
3062              *     input variables are assigned to the same location. For
3063              *     vertex shaders, multiple input variables may be assigned
3064              *     to the same location using either layout qualifiers or via
3065              *     the OpenGL API. However, such aliasing is intended only to
3066              *     support vertex shaders where each execution path accesses
3067              *     at most one input per each location. Implementations are
3068              *     permitted, but not required, to generate link-time errors
3069              *     if they detect that every path through the vertex shader
3070              *     executable accesses multiple inputs assigned to any single
3071              *     location. For all shader types, a program will fail to link
3072              *     if explicit location assignments leave the linker unable
3073              *     to find space for other variables without explicit
3074              *     assignments."
3075              *
3076              * From OpenGL ES 3.0 spec, page 56:
3077              *
3078              *    "Binding more than one attribute name to the same location
3079              *     is referred to as aliasing, and is not permitted in OpenGL
3080              *     ES Shading Language 3.00 vertex shaders. LinkProgram will
3081              *     fail when this condition exists. However, aliasing is
3082              *     possible in OpenGL ES Shading Language 1.00 vertex shaders.
3083              *     This will only work if only one of the aliased attributes
3084              *     is active in the executable program, or if no path through
3085              *     the shader consumes more than one attribute of a set of
3086              *     attributes aliased to the same location. A link error can
3087              *     occur if the linker determines that every path through the
3088              *     shader consumes multiple aliased attributes, but implemen-
3089              *     tations are not required to generate an error in this case."
3090              *
3091              * After looking at above references from OpenGL, OpenGL ES and
3092              * GLSL specifications, we allow aliasing of vertex input variables
3093              * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
3094              *
3095              * NOTE: This is not required by the spec but its worth mentioning
3096              * here that we're not doing anything to make sure that no path
3097              * through the vertex shader executable accesses multiple inputs
3098              * assigned to any single location.
3099              */
3100 
3101             /* Mask representing the contiguous slots that will be used by
3102              * this attribute.
3103              */
3104             const unsigned attr = var->data.location - generic_base;
3105             const unsigned use_mask = (1 << slots) - 1;
3106             const char *const string = (target_index == MESA_SHADER_VERTEX)
3107                ? "vertex shader input" : "fragment shader output";
3108 
3109             /* Generate a link error if the requested locations for this
3110              * attribute exceed the maximum allowed attribute location.
3111              */
3112             if (attr + slots > max_index) {
3113                linker_error(prog,
3114                            "insufficient contiguous locations "
3115                            "available for %s `%s' %d %d %d\n", string,
3116                            var->name, used_locations, use_mask, attr);
3117                return false;
3118             }
3119 
3120             /* Generate a link error if the set of bits requested for this
3121              * attribute overlaps any previously allocated bits.
3122              */
3123             if ((~(use_mask << attr) & used_locations) != used_locations) {
3124                if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
3125                   /* From section 4.4.2 (Output Layout Qualifiers) of the GLSL
3126                    * 4.40 spec:
3127                    *
3128                    *    "Additionally, for fragment shader outputs, if two
3129                    *    variables are placed within the same location, they
3130                    *    must have the same underlying type (floating-point or
3131                    *    integer). No component aliasing of output variables or
3132                    *    members is allowed.
3133                    */
3134                   for (unsigned i = 0; i < assigned_attr; i++) {
3135                      unsigned assigned_slots =
3136                         assigned[i]->type->count_attribute_slots(false);
3137                      unsigned assig_attr =
3138                         assigned[i]->data.location - generic_base;
3139                      unsigned assigned_use_mask = (1 << assigned_slots) - 1;
3140 
3141                      if ((assigned_use_mask << assig_attr) &
3142                          (use_mask << attr)) {
3143 
3144                         const glsl_type *assigned_type =
3145                            assigned[i]->type->without_array();
3146                         const glsl_type *type = var->type->without_array();
3147                         if (assigned_type->base_type != type->base_type) {
3148                            linker_error(prog, "types do not match for aliased"
3149                                         " %ss %s and %s\n", string,
3150                                         assigned[i]->name, var->name);
3151                            return false;
3152                         }
3153 
3154                         unsigned assigned_component_mask =
3155                            ((1 << assigned_type->vector_elements) - 1) <<
3156                            assigned[i]->data.location_frac;
3157                         unsigned component_mask =
3158                            ((1 << type->vector_elements) - 1) <<
3159                            var->data.location_frac;
3160                         if (assigned_component_mask & component_mask) {
3161                            linker_error(prog, "overlapping component is "
3162                                         "assigned to %ss %s and %s "
3163                                         "(component=%d)\n",
3164                                         string, assigned[i]->name, var->name,
3165                                         var->data.location_frac);
3166                            return false;
3167                         }
3168                      }
3169                   }
3170                } else if (target_index == MESA_SHADER_FRAGMENT ||
3171                           (prog->IsES && prog->data->Version >= 300)) {
3172                   linker_error(prog, "overlapping location is assigned "
3173                                "to %s `%s' %d %d %d\n", string, var->name,
3174                                used_locations, use_mask, attr);
3175                   return false;
3176                } else {
3177                   linker_warning(prog, "overlapping location is assigned "
3178                                  "to %s `%s' %d %d %d\n", string, var->name,
3179                                  used_locations, use_mask, attr);
3180                }
3181             }
3182 
3183             if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
3184                /* Only track assigned variables for non-ES fragment shaders
3185                 * to avoid overflowing the array.
3186                 *
3187                 * At most one variable per fragment output component should
3188                 * reach this.
3189                 */
3190                assert(assigned_attr < ARRAY_SIZE(assigned));
3191                assigned[assigned_attr] = var;
3192                assigned_attr++;
3193             }
3194 
3195             used_locations |= (use_mask << attr);
3196 
3197             /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
3198              *
3199              * "A program with more than the value of MAX_VERTEX_ATTRIBS
3200              *  active attribute variables may fail to link, unless
3201              *  device-dependent optimizations are able to make the program
3202              *  fit within available hardware resources. For the purposes
3203              *  of this test, attribute variables of the type dvec3, dvec4,
3204              *  dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
3205              *  count as consuming twice as many attributes as equivalent
3206              *  single-precision types. While these types use the same number
3207              *  of generic attributes as their single-precision equivalents,
3208              *  implementations are permitted to consume two single-precision
3209              *  vectors of internal storage for each three- or four-component
3210              *  double-precision vector."
3211              *
3212              * Mark this attribute slot as taking up twice as much space
3213              * so we can count it properly against limits.  According to
3214              * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
3215              * is optional behavior, but it seems preferable.
3216              */
3217             if (var->type->without_array()->is_dual_slot())
3218                double_storage_locations |= (use_mask << attr);
3219          }
3220 
3221          continue;
3222       }
3223 
3224       if (num_attr >= max_index) {
3225          linker_error(prog, "too many %s (max %u)",
3226                       target_index == MESA_SHADER_VERTEX ?
3227                       "vertex shader inputs" : "fragment shader outputs",
3228                       max_index);
3229          return false;
3230       }
3231       to_assign[num_attr].slots = slots;
3232       to_assign[num_attr].var = var;
3233       num_attr++;
3234    }
3235 
3236    if (!do_assignment)
3237       return true;
3238 
3239    if (target_index == MESA_SHADER_VERTEX) {
3240       unsigned total_attribs_size =
3241          util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
3242          util_bitcount(double_storage_locations);
3243       if (total_attribs_size > max_index) {
3244          linker_error(prog,
3245                       "attempt to use %d vertex attribute slots only %d available ",
3246                       total_attribs_size, max_index);
3247          return false;
3248       }
3249    }
3250 
3251    /* If all of the attributes were assigned locations by the application (or
3252     * are built-in attributes with fixed locations), return early.  This should
3253     * be the common case.
3254     */
3255    if (num_attr == 0)
3256       return true;
3257 
3258    qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
3259 
3260    if (target_index == MESA_SHADER_VERTEX) {
3261       /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS.  It can
3262        * only be explicitly assigned by via glBindAttribLocation.  Mark it as
3263        * reserved to prevent it from being automatically allocated below.
3264        */
3265       find_deref_visitor find("gl_Vertex");
3266       find.run(sh->ir);
3267       if (find.variable_found())
3268          used_locations |= (1 << 0);
3269    }
3270 
3271    for (unsigned i = 0; i < num_attr; i++) {
3272       /* Mask representing the contiguous slots that will be used by this
3273        * attribute.
3274        */
3275       const unsigned use_mask = (1 << to_assign[i].slots) - 1;
3276 
3277       int location = find_available_slots(used_locations, to_assign[i].slots);
3278 
3279       if (location < 0) {
3280          const char *const string = (target_index == MESA_SHADER_VERTEX)
3281             ? "vertex shader input" : "fragment shader output";
3282 
3283          linker_error(prog,
3284                       "insufficient contiguous locations "
3285                       "available for %s `%s'\n",
3286                       string, to_assign[i].var->name);
3287          return false;
3288       }
3289 
3290       to_assign[i].var->data.location = generic_base + location;
3291       to_assign[i].var->data.is_unmatched_generic_inout = 0;
3292       used_locations |= (use_mask << location);
3293 
3294       if (to_assign[i].var->type->without_array()->is_dual_slot())
3295          double_storage_locations |= (use_mask << location);
3296    }
3297 
3298    /* Now that we have all the locations, from the GL 4.5 core spec, section
3299     * 11.1.1 (Vertex Attributes), dvec3, dvec4, dmat2x3, dmat2x4, dmat3,
3300     * dmat3x4, dmat4x3, and dmat4 count as consuming twice as many attributes
3301     * as equivalent single-precision types.
3302     */
3303    if (target_index == MESA_SHADER_VERTEX) {
3304       unsigned total_attribs_size =
3305          util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
3306          util_bitcount(double_storage_locations);
3307       if (total_attribs_size > max_index) {
3308          linker_error(prog,
3309                       "attempt to use %d vertex attribute slots only %d available ",
3310                       total_attribs_size, max_index);
3311          return false;
3312       }
3313    }
3314 
3315    return true;
3316 }
3317 
3318 /**
3319  * Match explicit locations of outputs to inputs and deactivate the
3320  * unmatch flag if found so we don't optimise them away.
3321  */
3322 static void
match_explicit_outputs_to_inputs(gl_linked_shader * producer,gl_linked_shader * consumer)3323 match_explicit_outputs_to_inputs(gl_linked_shader *producer,
3324                                  gl_linked_shader *consumer)
3325 {
3326    glsl_symbol_table parameters;
3327    ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
3328       { {NULL, NULL} };
3329 
3330    /* Find all shader outputs in the "producer" stage.
3331     */
3332    foreach_in_list(ir_instruction, node, producer->ir) {
3333       ir_variable *const var = node->as_variable();
3334 
3335       if ((var == NULL) || (var->data.mode != ir_var_shader_out))
3336          continue;
3337 
3338       if (var->data.explicit_location &&
3339           var->data.location >= VARYING_SLOT_VAR0) {
3340          const unsigned idx = var->data.location - VARYING_SLOT_VAR0;
3341          if (explicit_locations[idx][var->data.location_frac] == NULL)
3342             explicit_locations[idx][var->data.location_frac] = var;
3343 
3344          /* Always match TCS outputs. They are shared by all invocations
3345           * within a patch and can be used as shared memory.
3346           */
3347          if (producer->Stage == MESA_SHADER_TESS_CTRL)
3348             var->data.is_unmatched_generic_inout = 0;
3349       }
3350    }
3351 
3352    /* Match inputs to outputs */
3353    foreach_in_list(ir_instruction, node, consumer->ir) {
3354       ir_variable *const input = node->as_variable();
3355 
3356       if ((input == NULL) || (input->data.mode != ir_var_shader_in))
3357          continue;
3358 
3359       ir_variable *output = NULL;
3360       if (input->data.explicit_location
3361           && input->data.location >= VARYING_SLOT_VAR0) {
3362          output = explicit_locations[input->data.location - VARYING_SLOT_VAR0]
3363             [input->data.location_frac];
3364 
3365          if (output != NULL){
3366             input->data.is_unmatched_generic_inout = 0;
3367             output->data.is_unmatched_generic_inout = 0;
3368          }
3369       }
3370    }
3371 }
3372 
3373 /**
3374  * Store the gl_FragDepth layout in the gl_shader_program struct.
3375  */
3376 static void
store_fragdepth_layout(struct gl_shader_program * prog)3377 store_fragdepth_layout(struct gl_shader_program *prog)
3378 {
3379    if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
3380       return;
3381    }
3382 
3383    struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
3384 
3385    /* We don't look up the gl_FragDepth symbol directly because if
3386     * gl_FragDepth is not used in the shader, it's removed from the IR.
3387     * However, the symbol won't be removed from the symbol table.
3388     *
3389     * We're only interested in the cases where the variable is NOT removed
3390     * from the IR.
3391     */
3392    foreach_in_list(ir_instruction, node, ir) {
3393       ir_variable *const var = node->as_variable();
3394 
3395       if (var == NULL || var->data.mode != ir_var_shader_out) {
3396          continue;
3397       }
3398 
3399       if (strcmp(var->name, "gl_FragDepth") == 0) {
3400          switch (var->data.depth_layout) {
3401          case ir_depth_layout_none:
3402             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
3403             return;
3404          case ir_depth_layout_any:
3405             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
3406             return;
3407          case ir_depth_layout_greater:
3408             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
3409             return;
3410          case ir_depth_layout_less:
3411             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
3412             return;
3413          case ir_depth_layout_unchanged:
3414             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
3415             return;
3416          default:
3417             assert(0);
3418             return;
3419          }
3420       }
3421    }
3422 }
3423 
3424 /**
3425  * Validate shader image resources.
3426  */
3427 static void
check_image_resources(const struct gl_constants * consts,const struct gl_extensions * exts,struct gl_shader_program * prog)3428 check_image_resources(const struct gl_constants *consts,
3429                       const struct gl_extensions *exts,
3430                       struct gl_shader_program *prog)
3431 {
3432    unsigned total_image_units = 0;
3433    unsigned fragment_outputs = 0;
3434    unsigned total_shader_storage_blocks = 0;
3435 
3436    if (!consts->MaxCombinedImageUniforms &&
3437        !consts->MaxCombinedShaderStorageBlocks)
3438       return;
3439 
3440    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3441       struct gl_linked_shader *sh = prog->_LinkedShaders[i];
3442 
3443       if (sh) {
3444          total_image_units += sh->Program->info.num_images;
3445          total_shader_storage_blocks += sh->Program->info.num_ssbos;
3446 
3447          if (i == MESA_SHADER_FRAGMENT) {
3448             foreach_in_list(ir_instruction, node, sh->ir) {
3449                ir_variable *var = node->as_variable();
3450                if (var && var->data.mode == ir_var_shader_out)
3451                   /* since there are no double fs outputs - pass false */
3452                   fragment_outputs += var->type->count_attribute_slots(false);
3453             }
3454          }
3455       }
3456    }
3457 
3458    if (total_image_units > consts->MaxCombinedImageUniforms)
3459       linker_error(prog, "Too many combined image uniforms\n");
3460 
3461    if (total_image_units + fragment_outputs + total_shader_storage_blocks >
3462        consts->MaxCombinedShaderOutputResources)
3463       linker_error(prog, "Too many combined image uniforms, shader storage "
3464                          " buffers and fragment outputs\n");
3465 }
3466 
3467 
3468 /**
3469  * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
3470  * for a variable, checks for overlaps between other uniforms using explicit
3471  * locations.
3472  */
3473 static int
reserve_explicit_locations(struct gl_shader_program * prog,string_to_uint_map * map,ir_variable * var)3474 reserve_explicit_locations(struct gl_shader_program *prog,
3475                            string_to_uint_map *map, ir_variable *var)
3476 {
3477    unsigned slots = var->type->uniform_locations();
3478    unsigned max_loc = var->data.location + slots - 1;
3479    unsigned return_value = slots;
3480 
3481    /* Resize remap table if locations do not fit in the current one. */
3482    if (max_loc + 1 > prog->NumUniformRemapTable) {
3483       prog->UniformRemapTable =
3484          reralloc(prog, prog->UniformRemapTable,
3485                   gl_uniform_storage *,
3486                   max_loc + 1);
3487 
3488       if (!prog->UniformRemapTable) {
3489          linker_error(prog, "Out of memory during linking.\n");
3490          return -1;
3491       }
3492 
3493       /* Initialize allocated space. */
3494       for (unsigned i = prog->NumUniformRemapTable; i < max_loc + 1; i++)
3495          prog->UniformRemapTable[i] = NULL;
3496 
3497       prog->NumUniformRemapTable = max_loc + 1;
3498    }
3499 
3500    for (unsigned i = 0; i < slots; i++) {
3501       unsigned loc = var->data.location + i;
3502 
3503       /* Check if location is already used. */
3504       if (prog->UniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3505 
3506          /* Possibly same uniform from a different stage, this is ok. */
3507          unsigned hash_loc;
3508          if (map->get(hash_loc, var->name) && hash_loc == loc - i) {
3509             return_value = 0;
3510             continue;
3511          }
3512 
3513          /* ARB_explicit_uniform_location specification states:
3514           *
3515           *     "No two default-block uniform variables in the program can have
3516           *     the same location, even if they are unused, otherwise a compiler
3517           *     or linker error will be generated."
3518           */
3519          linker_error(prog,
3520                       "location qualifier for uniform %s overlaps "
3521                       "previously used location\n",
3522                       var->name);
3523          return -1;
3524       }
3525 
3526       /* Initialize location as inactive before optimization
3527        * rounds and location assignment.
3528        */
3529       prog->UniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3530    }
3531 
3532    /* Note, base location used for arrays. */
3533    map->put(var->data.location, var->name);
3534 
3535    return return_value;
3536 }
3537 
3538 static bool
reserve_subroutine_explicit_locations(struct gl_shader_program * prog,struct gl_program * p,ir_variable * var)3539 reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
3540                                       struct gl_program *p,
3541                                       ir_variable *var)
3542 {
3543    unsigned slots = var->type->uniform_locations();
3544    unsigned max_loc = var->data.location + slots - 1;
3545 
3546    /* Resize remap table if locations do not fit in the current one. */
3547    if (max_loc + 1 > p->sh.NumSubroutineUniformRemapTable) {
3548       p->sh.SubroutineUniformRemapTable =
3549          reralloc(p, p->sh.SubroutineUniformRemapTable,
3550                   gl_uniform_storage *,
3551                   max_loc + 1);
3552 
3553       if (!p->sh.SubroutineUniformRemapTable) {
3554          linker_error(prog, "Out of memory during linking.\n");
3555          return false;
3556       }
3557 
3558       /* Initialize allocated space. */
3559       for (unsigned i = p->sh.NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
3560          p->sh.SubroutineUniformRemapTable[i] = NULL;
3561 
3562       p->sh.NumSubroutineUniformRemapTable = max_loc + 1;
3563    }
3564 
3565    for (unsigned i = 0; i < slots; i++) {
3566       unsigned loc = var->data.location + i;
3567 
3568       /* Check if location is already used. */
3569       if (p->sh.SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3570 
3571          /* ARB_explicit_uniform_location specification states:
3572           *     "No two subroutine uniform variables can have the same location
3573           *     in the same shader stage, otherwise a compiler or linker error
3574           *     will be generated."
3575           */
3576          linker_error(prog,
3577                       "location qualifier for uniform %s overlaps "
3578                       "previously used location\n",
3579                       var->name);
3580          return false;
3581       }
3582 
3583       /* Initialize location as inactive before optimization
3584        * rounds and location assignment.
3585        */
3586       p->sh.SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3587    }
3588 
3589    return true;
3590 }
3591 /**
3592  * Check and reserve all explicit uniform locations, called before
3593  * any optimizations happen to handle also inactive uniforms and
3594  * inactive array elements that may get trimmed away.
3595  */
3596 static void
check_explicit_uniform_locations(const struct gl_extensions * exts,struct gl_shader_program * prog)3597 check_explicit_uniform_locations(const struct gl_extensions *exts,
3598                                  struct gl_shader_program *prog)
3599 {
3600    prog->NumExplicitUniformLocations = 0;
3601 
3602    if (!exts->ARB_explicit_uniform_location)
3603       return;
3604 
3605    /* This map is used to detect if overlapping explicit locations
3606     * occur with the same uniform (from different stage) or a different one.
3607     */
3608    string_to_uint_map *uniform_map = new string_to_uint_map;
3609 
3610    if (!uniform_map) {
3611       linker_error(prog, "Out of memory during linking.\n");
3612       return;
3613    }
3614 
3615    unsigned entries_total = 0;
3616    unsigned mask = prog->data->linked_stages;
3617    while (mask) {
3618       const int i = u_bit_scan(&mask);
3619       struct gl_program *p = prog->_LinkedShaders[i]->Program;
3620 
3621       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
3622          ir_variable *var = node->as_variable();
3623          if (!var || var->data.mode != ir_var_uniform)
3624             continue;
3625 
3626          if (var->data.explicit_location) {
3627             bool ret = false;
3628             if (var->type->without_array()->is_subroutine())
3629                ret = reserve_subroutine_explicit_locations(prog, p, var);
3630             else {
3631                int slots = reserve_explicit_locations(prog, uniform_map,
3632                                                       var);
3633                if (slots != -1) {
3634                   ret = true;
3635                   entries_total += slots;
3636                }
3637             }
3638             if (!ret) {
3639                delete uniform_map;
3640                return;
3641             }
3642          }
3643       }
3644    }
3645 
3646    link_util_update_empty_uniform_locations(prog);
3647 
3648    delete uniform_map;
3649    prog->NumExplicitUniformLocations = entries_total;
3650 }
3651 
3652 /* Function checks if a variable var is a packed varying and
3653  * if given name is part of packed varying's list.
3654  *
3655  * If a variable is a packed varying, it has a name like
3656  * 'packed:a,b,c' where a, b and c are separate variables.
3657  */
3658 static bool
included_in_packed_varying(ir_variable * var,const char * name)3659 included_in_packed_varying(ir_variable *var, const char *name)
3660 {
3661    if (strncmp(var->name, "packed:", 7) != 0)
3662       return false;
3663 
3664    char *list = strdup(var->name + 7);
3665    assert(list);
3666 
3667    bool found = false;
3668    char *saveptr;
3669    char *token = strtok_r(list, ",", &saveptr);
3670    while (token) {
3671       if (strcmp(token, name) == 0) {
3672          found = true;
3673          break;
3674       }
3675       token = strtok_r(NULL, ",", &saveptr);
3676    }
3677    free(list);
3678    return found;
3679 }
3680 
3681 /**
3682  * Function builds a stage reference bitmask from variable name.
3683  */
3684 static uint8_t
build_stageref(struct gl_shader_program * shProg,const char * name,unsigned mode)3685 build_stageref(struct gl_shader_program *shProg, const char *name,
3686                unsigned mode)
3687 {
3688    uint8_t stages = 0;
3689 
3690    /* Note, that we assume MAX 8 stages, if there will be more stages, type
3691     * used for reference mask in gl_program_resource will need to be changed.
3692     */
3693    assert(MESA_SHADER_STAGES < 8);
3694 
3695    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3696       struct gl_linked_shader *sh = shProg->_LinkedShaders[i];
3697       if (!sh)
3698          continue;
3699 
3700       /* Shader symbol table may contain variables that have
3701        * been optimized away. Search IR for the variable instead.
3702        */
3703       foreach_in_list(ir_instruction, node, sh->ir) {
3704          ir_variable *var = node->as_variable();
3705          if (var) {
3706             unsigned baselen = strlen(var->name);
3707 
3708             if (included_in_packed_varying(var, name)) {
3709                   stages |= (1 << i);
3710                   break;
3711             }
3712 
3713             /* Type needs to match if specified, otherwise we might
3714              * pick a variable with same name but different interface.
3715              */
3716             if (var->data.mode != mode)
3717                continue;
3718 
3719             if (strncmp(var->name, name, baselen) == 0) {
3720                /* Check for exact name matches but also check for arrays and
3721                 * structs.
3722                 */
3723                if (name[baselen] == '\0' ||
3724                    name[baselen] == '[' ||
3725                    name[baselen] == '.') {
3726                   stages |= (1 << i);
3727                   break;
3728                }
3729             }
3730          }
3731       }
3732    }
3733    return stages;
3734 }
3735 
3736 /**
3737  * Create gl_shader_variable from ir_variable class.
3738  */
3739 static gl_shader_variable *
create_shader_variable(struct gl_shader_program * shProg,const ir_variable * in,const char * name,const glsl_type * type,const glsl_type * interface_type,bool use_implicit_location,int location,const glsl_type * outermost_struct_type)3740 create_shader_variable(struct gl_shader_program *shProg,
3741                        const ir_variable *in,
3742                        const char *name, const glsl_type *type,
3743                        const glsl_type *interface_type,
3744                        bool use_implicit_location, int location,
3745                        const glsl_type *outermost_struct_type)
3746 {
3747    /* Allocate zero-initialized memory to ensure that bitfield padding
3748     * is zero.
3749     */
3750    gl_shader_variable *out = rzalloc(shProg, struct gl_shader_variable);
3751    if (!out)
3752       return NULL;
3753 
3754    /* Since gl_VertexID may be lowered to gl_VertexIDMESA, but applications
3755     * expect to see gl_VertexID in the program resource list.  Pretend.
3756     */
3757    if (in->data.mode == ir_var_system_value &&
3758        in->data.location == SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) {
3759       out->name.string = ralloc_strdup(shProg, "gl_VertexID");
3760    } else if ((in->data.mode == ir_var_shader_out &&
3761                in->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) ||
3762               (in->data.mode == ir_var_system_value &&
3763                in->data.location == SYSTEM_VALUE_TESS_LEVEL_OUTER)) {
3764       out->name.string = ralloc_strdup(shProg, "gl_TessLevelOuter");
3765       type = glsl_type::get_array_instance(glsl_type::float_type, 4);
3766    } else if ((in->data.mode == ir_var_shader_out &&
3767                in->data.location == VARYING_SLOT_TESS_LEVEL_INNER) ||
3768               (in->data.mode == ir_var_system_value &&
3769                in->data.location == SYSTEM_VALUE_TESS_LEVEL_INNER)) {
3770       out->name.string = ralloc_strdup(shProg, "gl_TessLevelInner");
3771       type = glsl_type::get_array_instance(glsl_type::float_type, 2);
3772    } else {
3773       out->name.string = ralloc_strdup(shProg, name);
3774    }
3775 
3776    resource_name_updated(&out->name);
3777 
3778    if (!out->name.string)
3779       return NULL;
3780 
3781    /* The ARB_program_interface_query spec says:
3782     *
3783     *     "Not all active variables are assigned valid locations; the
3784     *     following variables will have an effective location of -1:
3785     *
3786     *      * uniforms declared as atomic counters;
3787     *
3788     *      * members of a uniform block;
3789     *
3790     *      * built-in inputs, outputs, and uniforms (starting with "gl_"); and
3791     *
3792     *      * inputs or outputs not declared with a "location" layout
3793     *        qualifier, except for vertex shader inputs and fragment shader
3794     *        outputs."
3795     */
3796    if (in->type->is_atomic_uint() || is_gl_identifier(in->name) ||
3797        !(in->data.explicit_location || use_implicit_location)) {
3798       out->location = -1;
3799    } else {
3800       out->location = location;
3801    }
3802 
3803    out->type = type;
3804    out->outermost_struct_type = outermost_struct_type;
3805    out->interface_type = interface_type;
3806    out->component = in->data.location_frac;
3807    out->index = in->data.index;
3808    out->patch = in->data.patch;
3809    out->mode = in->data.mode;
3810    out->interpolation = in->data.interpolation;
3811    out->explicit_location = in->data.explicit_location;
3812    out->precision = in->data.precision;
3813 
3814    return out;
3815 }
3816 
3817 static bool
add_shader_variable(struct gl_shader_program * shProg,struct set * resource_set,unsigned stage_mask,GLenum programInterface,ir_variable * var,const char * name,const glsl_type * type,bool use_implicit_location,int location,bool inouts_share_location,const glsl_type * outermost_struct_type=NULL)3818 add_shader_variable(struct gl_shader_program *shProg,
3819                     struct set *resource_set,
3820                     unsigned stage_mask,
3821                     GLenum programInterface, ir_variable *var,
3822                     const char *name, const glsl_type *type,
3823                     bool use_implicit_location, int location,
3824                     bool inouts_share_location,
3825                     const glsl_type *outermost_struct_type = NULL)
3826 {
3827    const glsl_type *interface_type = var->get_interface_type();
3828 
3829    if (outermost_struct_type == NULL) {
3830       if (var->data.from_named_ifc_block) {
3831          const char *interface_name = interface_type->name;
3832 
3833          if (interface_type->is_array()) {
3834             /* Issue #16 of the ARB_program_interface_query spec says:
3835              *
3836              * "* If a variable is a member of an interface block without an
3837              *    instance name, it is enumerated using just the variable name.
3838              *
3839              *  * If a variable is a member of an interface block with an
3840              *    instance name, it is enumerated as "BlockName.Member", where
3841              *    "BlockName" is the name of the interface block (not the
3842              *    instance name) and "Member" is the name of the variable."
3843              *
3844              * In particular, it indicates that it should be "BlockName",
3845              * not "BlockName[array length]".  The conformance suite and
3846              * dEQP both require this behavior.
3847              *
3848              * Here, we unwrap the extra array level added by named interface
3849              * block array lowering so we have the correct variable type.  We
3850              * also unwrap the interface type when constructing the name.
3851              *
3852              * We leave interface_type the same so that ES 3.x SSO pipeline
3853              * validation can enforce the rules requiring array length to
3854              * match on interface blocks.
3855              */
3856             type = type->fields.array;
3857 
3858             interface_name = interface_type->fields.array->name;
3859          }
3860 
3861          name = ralloc_asprintf(shProg, "%s.%s", interface_name, name);
3862       }
3863    }
3864 
3865    switch (type->base_type) {
3866    case GLSL_TYPE_STRUCT: {
3867       /* The ARB_program_interface_query spec says:
3868        *
3869        *     "For an active variable declared as a structure, a separate entry
3870        *     will be generated for each active structure member.  The name of
3871        *     each entry is formed by concatenating the name of the structure,
3872        *     the "."  character, and the name of the structure member.  If a
3873        *     structure member to enumerate is itself a structure or array,
3874        *     these enumeration rules are applied recursively."
3875        */
3876       if (outermost_struct_type == NULL)
3877          outermost_struct_type = type;
3878 
3879       unsigned field_location = location;
3880       for (unsigned i = 0; i < type->length; i++) {
3881          const struct glsl_struct_field *field = &type->fields.structure[i];
3882          char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
3883          if (!add_shader_variable(shProg, resource_set,
3884                                   stage_mask, programInterface,
3885                                   var, field_name, field->type,
3886                                   use_implicit_location, field_location,
3887                                   false, outermost_struct_type))
3888             return false;
3889 
3890          field_location += field->type->count_attribute_slots(false);
3891       }
3892       return true;
3893    }
3894 
3895    case GLSL_TYPE_ARRAY: {
3896       /* The ARB_program_interface_query spec says:
3897        *
3898        *     "For an active variable declared as an array of basic types, a
3899        *      single entry will be generated, with its name string formed by
3900        *      concatenating the name of the array and the string "[0]"."
3901        *
3902        *     "For an active variable declared as an array of an aggregate data
3903        *      type (structures or arrays), a separate entry will be generated
3904        *      for each active array element, unless noted immediately below.
3905        *      The name of each entry is formed by concatenating the name of
3906        *      the array, the "[" character, an integer identifying the element
3907        *      number, and the "]" character.  These enumeration rules are
3908        *      applied recursively, treating each enumerated array element as a
3909        *      separate active variable."
3910        */
3911       const struct glsl_type *array_type = type->fields.array;
3912       if (array_type->base_type == GLSL_TYPE_STRUCT ||
3913           array_type->base_type == GLSL_TYPE_ARRAY) {
3914          unsigned elem_location = location;
3915          unsigned stride = inouts_share_location ? 0 :
3916                            array_type->count_attribute_slots(false);
3917          for (unsigned i = 0; i < type->length; i++) {
3918             char *elem = ralloc_asprintf(shProg, "%s[%d]", name, i);
3919             if (!add_shader_variable(shProg, resource_set,
3920                                      stage_mask, programInterface,
3921                                      var, elem, array_type,
3922                                      use_implicit_location, elem_location,
3923                                      false, outermost_struct_type))
3924                return false;
3925             elem_location += stride;
3926          }
3927          return true;
3928       }
3929       FALLTHROUGH;
3930    }
3931 
3932    default: {
3933       /* The ARB_program_interface_query spec says:
3934        *
3935        *     "For an active variable declared as a single instance of a basic
3936        *     type, a single entry will be generated, using the variable name
3937        *     from the shader source."
3938        */
3939       gl_shader_variable *sha_v =
3940          create_shader_variable(shProg, var, name, type, interface_type,
3941                                 use_implicit_location, location,
3942                                 outermost_struct_type);
3943       if (!sha_v)
3944          return false;
3945 
3946       return link_util_add_program_resource(shProg, resource_set,
3947                                             programInterface, sha_v, stage_mask);
3948    }
3949    }
3950 }
3951 
3952 static bool
inout_has_same_location(const ir_variable * var,unsigned stage)3953 inout_has_same_location(const ir_variable *var, unsigned stage)
3954 {
3955    if (!var->data.patch &&
3956        ((var->data.mode == ir_var_shader_out &&
3957          stage == MESA_SHADER_TESS_CTRL) ||
3958         (var->data.mode == ir_var_shader_in &&
3959          (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
3960           stage == MESA_SHADER_GEOMETRY))))
3961       return true;
3962    else
3963       return false;
3964 }
3965 
3966 static bool
add_interface_variables(struct gl_shader_program * shProg,struct set * resource_set,unsigned stage,GLenum programInterface)3967 add_interface_variables(struct gl_shader_program *shProg,
3968                         struct set *resource_set,
3969                         unsigned stage, GLenum programInterface)
3970 {
3971    exec_list *ir = shProg->_LinkedShaders[stage]->ir;
3972 
3973    foreach_in_list(ir_instruction, node, ir) {
3974       ir_variable *var = node->as_variable();
3975 
3976       if (!var || var->data.how_declared == ir_var_hidden)
3977          continue;
3978 
3979       int loc_bias;
3980 
3981       switch (var->data.mode) {
3982       case ir_var_system_value:
3983       case ir_var_shader_in:
3984          if (programInterface != GL_PROGRAM_INPUT)
3985             continue;
3986          loc_bias = (stage == MESA_SHADER_VERTEX) ? int(VERT_ATTRIB_GENERIC0)
3987                                                   : int(VARYING_SLOT_VAR0);
3988          break;
3989       case ir_var_shader_out:
3990          if (programInterface != GL_PROGRAM_OUTPUT)
3991             continue;
3992          loc_bias = (stage == MESA_SHADER_FRAGMENT) ? int(FRAG_RESULT_DATA0)
3993                                                     : int(VARYING_SLOT_VAR0);
3994          break;
3995       default:
3996          continue;
3997       };
3998 
3999       if (var->data.patch)
4000          loc_bias = int(VARYING_SLOT_PATCH0);
4001 
4002       /* Skip packed varyings, packed varyings are handled separately
4003        * by add_packed_varyings.
4004        */
4005       if (strncmp(var->name, "packed:", 7) == 0)
4006          continue;
4007 
4008       /* Skip fragdata arrays, these are handled separately
4009        * by add_fragdata_arrays.
4010        */
4011       if (strncmp(var->name, "gl_out_FragData", 15) == 0)
4012          continue;
4013 
4014       const bool vs_input_or_fs_output =
4015          (stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
4016          (stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out);
4017 
4018       if (!add_shader_variable(shProg, resource_set,
4019                                1 << stage, programInterface,
4020                                var, var->name, var->type, vs_input_or_fs_output,
4021                                var->data.location - loc_bias,
4022                                inout_has_same_location(var, stage)))
4023          return false;
4024    }
4025    return true;
4026 }
4027 
4028 static bool
add_packed_varyings(struct gl_shader_program * shProg,struct set * resource_set,int stage,GLenum type)4029 add_packed_varyings(struct gl_shader_program *shProg,
4030                     struct set *resource_set,
4031                     int stage, GLenum type)
4032 {
4033    struct gl_linked_shader *sh = shProg->_LinkedShaders[stage];
4034    GLenum iface;
4035 
4036    if (!sh || !sh->packed_varyings)
4037       return true;
4038 
4039    foreach_in_list(ir_instruction, node, sh->packed_varyings) {
4040       ir_variable *var = node->as_variable();
4041       if (var) {
4042          switch (var->data.mode) {
4043          case ir_var_shader_in:
4044             iface = GL_PROGRAM_INPUT;
4045             break;
4046          case ir_var_shader_out:
4047             iface = GL_PROGRAM_OUTPUT;
4048             break;
4049          default:
4050             unreachable("unexpected type");
4051          }
4052 
4053          if (type == iface) {
4054             const int stage_mask =
4055                build_stageref(shProg, var->name, var->data.mode);
4056             if (!add_shader_variable(shProg, resource_set,
4057                                      stage_mask,
4058                                      iface, var, var->name, var->type, false,
4059                                      var->data.location - VARYING_SLOT_VAR0,
4060                                      inout_has_same_location(var, stage)))
4061                return false;
4062          }
4063       }
4064    }
4065    return true;
4066 }
4067 
4068 static bool
add_fragdata_arrays(struct gl_shader_program * shProg,struct set * resource_set)4069 add_fragdata_arrays(struct gl_shader_program *shProg,
4070                     struct set *resource_set)
4071 {
4072    struct gl_linked_shader *sh = shProg->_LinkedShaders[MESA_SHADER_FRAGMENT];
4073 
4074    if (!sh || !sh->fragdata_arrays)
4075       return true;
4076 
4077    foreach_in_list(ir_instruction, node, sh->fragdata_arrays) {
4078       ir_variable *var = node->as_variable();
4079       if (var) {
4080          assert(var->data.mode == ir_var_shader_out);
4081 
4082          if (!add_shader_variable(shProg, resource_set,
4083                                   1 << MESA_SHADER_FRAGMENT,
4084                                   GL_PROGRAM_OUTPUT, var, var->name, var->type,
4085                                   true, var->data.location - FRAG_RESULT_DATA0,
4086                                   false))
4087             return false;
4088       }
4089    }
4090    return true;
4091 }
4092 
4093 /**
4094  * Builds up a list of program resources that point to existing
4095  * resource data.
4096  */
4097 void
build_program_resource_list(const struct gl_constants * consts,struct gl_shader_program * shProg,bool add_packed_varyings_only)4098 build_program_resource_list(const struct gl_constants *consts,
4099                             struct gl_shader_program *shProg,
4100                             bool add_packed_varyings_only)
4101 {
4102    /* Rebuild resource list. */
4103    if (shProg->data->ProgramResourceList) {
4104       ralloc_free(shProg->data->ProgramResourceList);
4105       shProg->data->ProgramResourceList = NULL;
4106       shProg->data->NumProgramResourceList = 0;
4107    }
4108 
4109    int input_stage = MESA_SHADER_STAGES, output_stage = 0;
4110 
4111    /* Determine first input and final output stage. These are used to
4112     * detect which variables should be enumerated in the resource list
4113     * for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
4114     */
4115    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4116       if (!shProg->_LinkedShaders[i])
4117          continue;
4118       if (input_stage == MESA_SHADER_STAGES)
4119          input_stage = i;
4120       output_stage = i;
4121    }
4122 
4123    /* Empty shader, no resources. */
4124    if (input_stage == MESA_SHADER_STAGES && output_stage == 0)
4125       return;
4126 
4127    struct set *resource_set = _mesa_pointer_set_create(NULL);
4128 
4129    /* Program interface needs to expose varyings in case of SSO. */
4130    if (shProg->SeparateShader) {
4131       if (!add_packed_varyings(shProg, resource_set,
4132                                input_stage, GL_PROGRAM_INPUT))
4133          return;
4134 
4135       if (!add_packed_varyings(shProg, resource_set,
4136                                output_stage, GL_PROGRAM_OUTPUT))
4137          return;
4138    }
4139 
4140    if (add_packed_varyings_only) {
4141       _mesa_set_destroy(resource_set, NULL);
4142       return;
4143    }
4144 
4145    if (!add_fragdata_arrays(shProg, resource_set))
4146       return;
4147 
4148    /* Add inputs and outputs to the resource list. */
4149    if (!add_interface_variables(shProg, resource_set,
4150                                 input_stage, GL_PROGRAM_INPUT))
4151       return;
4152 
4153    if (!add_interface_variables(shProg, resource_set,
4154                                 output_stage, GL_PROGRAM_OUTPUT))
4155       return;
4156 
4157    if (shProg->last_vert_prog) {
4158       struct gl_transform_feedback_info *linked_xfb =
4159          shProg->last_vert_prog->sh.LinkedTransformFeedback;
4160 
4161       /* Add transform feedback varyings. */
4162       if (linked_xfb->NumVarying > 0) {
4163          for (int i = 0; i < linked_xfb->NumVarying; i++) {
4164             if (!link_util_add_program_resource(shProg, resource_set,
4165                                                 GL_TRANSFORM_FEEDBACK_VARYING,
4166                                                 &linked_xfb->Varyings[i], 0))
4167             return;
4168          }
4169       }
4170 
4171       /* Add transform feedback buffers. */
4172       for (unsigned i = 0; i < consts->MaxTransformFeedbackBuffers; i++) {
4173          if ((linked_xfb->ActiveBuffers >> i) & 1) {
4174             linked_xfb->Buffers[i].Binding = i;
4175             if (!link_util_add_program_resource(shProg, resource_set,
4176                                                 GL_TRANSFORM_FEEDBACK_BUFFER,
4177                                                 &linked_xfb->Buffers[i], 0))
4178             return;
4179          }
4180       }
4181    }
4182 
4183    int top_level_array_base_offset = -1;
4184    int top_level_array_size_in_bytes = -1;
4185    int second_element_offset = -1;
4186    int buffer_block_index = -1;
4187 
4188    /* Add uniforms from uniform storage. */
4189    for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
4190       /* Do not add uniforms internally used by Mesa. */
4191       if (shProg->data->UniformStorage[i].hidden)
4192          continue;
4193 
4194       bool is_shader_storage =
4195         shProg->data->UniformStorage[i].is_shader_storage;
4196       GLenum type = is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
4197       if (!link_util_should_add_buffer_variable(shProg,
4198                                                 &shProg->data->UniformStorage[i],
4199                                                 top_level_array_base_offset,
4200                                                 top_level_array_size_in_bytes,
4201                                                 second_element_offset,
4202                                                 buffer_block_index))
4203          continue;
4204 
4205       if (is_shader_storage) {
4206          /* From the OpenGL 4.6 specification, 7.3.1.1 Naming Active Resources:
4207           *
4208           *    "For an active shader storage block member declared as an array
4209           *    of an aggregate type, an entry will be generated only for the
4210           *    first array element, regardless of its type. Such block members
4211           *    are referred to as top-level arrays. If the block member is an
4212           *    aggregate type, the enumeration rules are then applied
4213           *    recursively."
4214           *
4215           * Below we update our tracking values used by
4216           * link_util_should_add_buffer_variable(). We only want to reset the
4217           * offsets once we have moved past the first element.
4218           */
4219          if (shProg->data->UniformStorage[i].offset >= second_element_offset) {
4220             top_level_array_base_offset =
4221                shProg->data->UniformStorage[i].offset;
4222 
4223             top_level_array_size_in_bytes =
4224                shProg->data->UniformStorage[i].top_level_array_size *
4225                shProg->data->UniformStorage[i].top_level_array_stride;
4226 
4227             /* Set or reset the second element offset. For non arrays this
4228              * will be set to -1.
4229              */
4230             second_element_offset = top_level_array_size_in_bytes ?
4231                top_level_array_base_offset +
4232                shProg->data->UniformStorage[i].top_level_array_stride : -1;
4233          }
4234 
4235          buffer_block_index = shProg->data->UniformStorage[i].block_index;
4236       }
4237 
4238       uint8_t stageref = shProg->data->UniformStorage[i].active_shader_mask;
4239       if (!link_util_add_program_resource(shProg, resource_set, type,
4240                                           &shProg->data->UniformStorage[i], stageref))
4241          return;
4242    }
4243 
4244    /* Add program uniform blocks. */
4245    for (unsigned i = 0; i < shProg->data->NumUniformBlocks; i++) {
4246       if (!link_util_add_program_resource(shProg, resource_set, GL_UNIFORM_BLOCK,
4247                                           &shProg->data->UniformBlocks[i], 0))
4248          return;
4249    }
4250 
4251    /* Add program shader storage blocks. */
4252    for (unsigned i = 0; i < shProg->data->NumShaderStorageBlocks; i++) {
4253       if (!link_util_add_program_resource(shProg, resource_set, GL_SHADER_STORAGE_BLOCK,
4254                                           &shProg->data->ShaderStorageBlocks[i], 0))
4255          return;
4256    }
4257 
4258    /* Add atomic counter buffers. */
4259    for (unsigned i = 0; i < shProg->data->NumAtomicBuffers; i++) {
4260       if (!link_util_add_program_resource(shProg, resource_set, GL_ATOMIC_COUNTER_BUFFER,
4261                                           &shProg->data->AtomicBuffers[i], 0))
4262          return;
4263    }
4264 
4265    for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
4266       GLenum type;
4267       if (!shProg->data->UniformStorage[i].hidden)
4268          continue;
4269 
4270       for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
4271          if (!shProg->data->UniformStorage[i].opaque[j].active ||
4272              !shProg->data->UniformStorage[i].type->is_subroutine())
4273             continue;
4274 
4275          type = _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
4276          /* add shader subroutines */
4277          if (!link_util_add_program_resource(shProg, resource_set,
4278                                              type, &shProg->data->UniformStorage[i], 0))
4279             return;
4280       }
4281    }
4282 
4283    unsigned mask = shProg->data->linked_stages;
4284    while (mask) {
4285       const int i = u_bit_scan(&mask);
4286       struct gl_program *p = shProg->_LinkedShaders[i]->Program;
4287 
4288       GLuint type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
4289       for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4290          if (!link_util_add_program_resource(shProg, resource_set,
4291                                              type, &p->sh.SubroutineFunctions[j], 0))
4292             return;
4293       }
4294    }
4295 
4296    _mesa_set_destroy(resource_set, NULL);
4297 }
4298 
4299 /**
4300  * This check is done to make sure we allow only constant expression
4301  * indexing and "constant-index-expression" (indexing with an expression
4302  * that includes loop induction variable).
4303  */
4304 static bool
validate_sampler_array_indexing(const struct gl_constants * consts,struct gl_shader_program * prog)4305 validate_sampler_array_indexing(const struct gl_constants *consts,
4306                                 struct gl_shader_program *prog)
4307 {
4308    dynamic_sampler_array_indexing_visitor v;
4309    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4310       if (prog->_LinkedShaders[i] == NULL)
4311          continue;
4312 
4313       bool no_dynamic_indexing =
4314          consts->ShaderCompilerOptions[i].EmitNoIndirectSampler;
4315 
4316       /* Search for array derefs in shader. */
4317       v.run(prog->_LinkedShaders[i]->ir);
4318       if (v.uses_dynamic_sampler_array_indexing()) {
4319          const char *msg = "sampler arrays indexed with non-constant "
4320                            "expressions is forbidden in GLSL %s %u";
4321          /* Backend has indicated that it has no dynamic indexing support. */
4322          if (no_dynamic_indexing) {
4323             linker_error(prog, msg, prog->IsES ? "ES" : "",
4324                          prog->data->Version);
4325             return false;
4326          } else {
4327             linker_warning(prog, msg, prog->IsES ? "ES" : "",
4328                            prog->data->Version);
4329          }
4330       }
4331    }
4332    return true;
4333 }
4334 
4335 static void
link_assign_subroutine_types(struct gl_shader_program * prog)4336 link_assign_subroutine_types(struct gl_shader_program *prog)
4337 {
4338    unsigned mask = prog->data->linked_stages;
4339    while (mask) {
4340       const int i = u_bit_scan(&mask);
4341       gl_program *p = prog->_LinkedShaders[i]->Program;
4342 
4343       p->sh.MaxSubroutineFunctionIndex = 0;
4344       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
4345          ir_function *fn = node->as_function();
4346          if (!fn)
4347             continue;
4348 
4349          if (fn->is_subroutine)
4350             p->sh.NumSubroutineUniformTypes++;
4351 
4352          if (!fn->num_subroutine_types)
4353             continue;
4354 
4355          /* these should have been calculated earlier. */
4356          assert(fn->subroutine_index != -1);
4357          if (p->sh.NumSubroutineFunctions + 1 > MAX_SUBROUTINES) {
4358             linker_error(prog, "Too many subroutine functions declared.\n");
4359             return;
4360          }
4361          p->sh.SubroutineFunctions = reralloc(p, p->sh.SubroutineFunctions,
4362                                             struct gl_subroutine_function,
4363                                             p->sh.NumSubroutineFunctions + 1);
4364          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name.string = ralloc_strdup(p, fn->name);
4365          resource_name_updated(&p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name);
4366          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
4367          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types =
4368             ralloc_array(p, const struct glsl_type *,
4369                          fn->num_subroutine_types);
4370 
4371          /* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
4372           * GLSL 4.5 spec:
4373           *
4374           *    "Each subroutine with an index qualifier in the shader must be
4375           *    given a unique index, otherwise a compile or link error will be
4376           *    generated."
4377           */
4378          for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4379             if (p->sh.SubroutineFunctions[j].index != -1 &&
4380                 p->sh.SubroutineFunctions[j].index == fn->subroutine_index) {
4381                linker_error(prog, "each subroutine index qualifier in the "
4382                             "shader must be unique\n");
4383                return;
4384             }
4385          }
4386          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].index =
4387             fn->subroutine_index;
4388 
4389          if (fn->subroutine_index > (int)p->sh.MaxSubroutineFunctionIndex)
4390             p->sh.MaxSubroutineFunctionIndex = fn->subroutine_index;
4391 
4392          for (int j = 0; j < fn->num_subroutine_types; j++)
4393             p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
4394          p->sh.NumSubroutineFunctions++;
4395       }
4396    }
4397 }
4398 
4399 static void
verify_subroutine_associated_funcs(struct gl_shader_program * prog)4400 verify_subroutine_associated_funcs(struct gl_shader_program *prog)
4401 {
4402    unsigned mask = prog->data->linked_stages;
4403    while (mask) {
4404       const int i = u_bit_scan(&mask);
4405       gl_program *p = prog->_LinkedShaders[i]->Program;
4406       glsl_symbol_table *symbols = prog->_LinkedShaders[i]->symbols;
4407 
4408       /* Section 6.1.2 (Subroutines) of the GLSL 4.00 spec says:
4409        *
4410        *   "A program will fail to compile or link if any shader
4411        *    or stage contains two or more functions with the same
4412        *    name if the name is associated with a subroutine type."
4413        */
4414       for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4415          unsigned definitions = 0;
4416          char *name = p->sh.SubroutineFunctions[j].name.string;
4417          ir_function *fn = symbols->get_function(name);
4418 
4419          /* Calculate number of function definitions with the same name */
4420          foreach_in_list(ir_function_signature, sig, &fn->signatures) {
4421             if (sig->is_defined) {
4422                if (++definitions > 1) {
4423                   linker_error(prog, "%s shader contains two or more function "
4424                                "definitions with name `%s', which is "
4425                                "associated with a subroutine type.\n",
4426                                _mesa_shader_stage_to_string(i),
4427                                fn->name);
4428                   return;
4429                }
4430             }
4431          }
4432       }
4433    }
4434 }
4435 
4436 
4437 static void
set_always_active_io(exec_list * ir,ir_variable_mode io_mode)4438 set_always_active_io(exec_list *ir, ir_variable_mode io_mode)
4439 {
4440    assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
4441 
4442    foreach_in_list(ir_instruction, node, ir) {
4443       ir_variable *const var = node->as_variable();
4444 
4445       if (var == NULL || var->data.mode != io_mode)
4446          continue;
4447 
4448       /* Don't set always active on builtins that haven't been redeclared */
4449       if (var->data.how_declared == ir_var_declared_implicitly)
4450          continue;
4451 
4452       var->data.always_active_io = true;
4453    }
4454 }
4455 
4456 /**
4457  * When separate shader programs are enabled, only input/outputs between
4458  * the stages of a multi-stage separate program can be safely removed
4459  * from the shader interface. Other inputs/outputs must remain active.
4460  */
4461 static void
disable_varying_optimizations_for_sso(struct gl_shader_program * prog)4462 disable_varying_optimizations_for_sso(struct gl_shader_program *prog)
4463 {
4464    unsigned first, last;
4465    assert(prog->SeparateShader);
4466 
4467    first = MESA_SHADER_STAGES;
4468    last = 0;
4469 
4470    /* Determine first and last stage. Excluding the compute stage */
4471    for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
4472       if (!prog->_LinkedShaders[i])
4473          continue;
4474       if (first == MESA_SHADER_STAGES)
4475          first = i;
4476       last = i;
4477    }
4478 
4479    if (first == MESA_SHADER_STAGES)
4480       return;
4481 
4482    for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
4483       gl_linked_shader *sh = prog->_LinkedShaders[stage];
4484       if (!sh)
4485          continue;
4486 
4487       /* Prevent the removal of inputs to the first and outputs from the last
4488        * stage, unless they are the initial pipeline inputs or final pipeline
4489        * outputs, respectively.
4490        *
4491        * The removal of IO between shaders in the same program is always
4492        * allowed.
4493        */
4494       if (stage == first && stage != MESA_SHADER_VERTEX)
4495          set_always_active_io(sh->ir, ir_var_shader_in);
4496       if (stage == last && stage != MESA_SHADER_FRAGMENT)
4497          set_always_active_io(sh->ir, ir_var_shader_out);
4498    }
4499 }
4500 
4501 static void
link_and_validate_uniforms(const struct gl_constants * consts,const struct gl_extensions * exts,struct gl_shader_program * prog)4502 link_and_validate_uniforms(const struct gl_constants *consts,
4503                            const struct gl_extensions *exts,
4504                            struct gl_shader_program *prog)
4505 {
4506    assert(!consts->UseNIRGLSLLinker);
4507 
4508    update_array_sizes(prog);
4509    link_assign_uniform_locations(prog, consts);
4510 
4511    if (prog->data->LinkStatus == LINKING_FAILURE)
4512       return;
4513 
4514    link_util_calculate_subroutine_compat(prog);
4515    link_util_check_uniform_resources(consts, prog);
4516    link_util_check_subroutine_resources(prog);
4517    check_image_resources(consts, exts, prog);
4518    link_assign_atomic_counter_resources(consts, prog);
4519    link_check_atomic_counter_resources(consts, prog);
4520 }
4521 
4522 static bool
link_varyings_and_uniforms(unsigned first,unsigned last,const struct gl_constants * consts,const struct gl_extensions * exts,gl_api api,struct gl_shader_program * prog,void * mem_ctx)4523 link_varyings_and_uniforms(unsigned first, unsigned last,
4524                            const struct gl_constants *consts,
4525                            const struct gl_extensions *exts,
4526                            gl_api api,
4527                            struct gl_shader_program *prog, void *mem_ctx)
4528 {
4529    /* Mark all generic shader inputs and outputs as unpaired. */
4530    for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
4531       if (prog->_LinkedShaders[i] != NULL) {
4532          link_invalidate_variable_locations(prog->_LinkedShaders[i]->ir);
4533       }
4534    }
4535 
4536    unsigned prev = first;
4537    for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
4538       if (prog->_LinkedShaders[i] == NULL)
4539          continue;
4540 
4541       match_explicit_outputs_to_inputs(prog->_LinkedShaders[prev],
4542                                        prog->_LinkedShaders[i]);
4543       prev = i;
4544    }
4545 
4546    if (!assign_attribute_or_color_locations(mem_ctx, prog, consts,
4547                                             MESA_SHADER_VERTEX, true)) {
4548       return false;
4549    }
4550 
4551    if (!assign_attribute_or_color_locations(mem_ctx, prog, consts,
4552                                             MESA_SHADER_FRAGMENT, true)) {
4553       return false;
4554    }
4555 
4556    prog->last_vert_prog = NULL;
4557    for (int i = MESA_SHADER_GEOMETRY; i >= MESA_SHADER_VERTEX; i--) {
4558       if (prog->_LinkedShaders[i] == NULL)
4559          continue;
4560 
4561       prog->last_vert_prog = prog->_LinkedShaders[i]->Program;
4562       break;
4563    }
4564 
4565    if (!link_varyings(prog, first, last, consts, exts,
4566                       api, mem_ctx))
4567       return false;
4568 
4569    if (!consts->UseNIRGLSLLinker)
4570      link_and_validate_uniforms(consts, exts, prog);
4571 
4572    if (!prog->data->LinkStatus)
4573       return false;
4574 
4575    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4576       if (prog->_LinkedShaders[i] == NULL)
4577          continue;
4578 
4579       const struct gl_shader_compiler_options *options =
4580          &consts->ShaderCompilerOptions[i];
4581 
4582       if (options->LowerBufferInterfaceBlocks)
4583          lower_ubo_reference(prog->_LinkedShaders[i],
4584                              options->ClampBlockIndicesToArrayBounds,
4585                              consts->UseSTD430AsDefaultPacking);
4586 
4587       if (i == MESA_SHADER_COMPUTE)
4588          lower_shared_reference(consts, prog, prog->_LinkedShaders[i]);
4589 
4590       lower_vector_derefs(prog->_LinkedShaders[i]);
4591       do_vec_index_to_swizzle(prog->_LinkedShaders[i]->ir);
4592    }
4593 
4594    return true;
4595 }
4596 
4597 static void
linker_optimisation_loop(const struct gl_constants * consts,exec_list * ir,unsigned stage)4598 linker_optimisation_loop(const struct gl_constants *consts, exec_list *ir,
4599                          unsigned stage)
4600 {
4601       if (consts->GLSLOptimizeConservatively) {
4602          /* Run it just once. */
4603          do_common_optimization(ir, true, false,
4604                                 &consts->ShaderCompilerOptions[stage],
4605                                 consts->NativeIntegers);
4606       } else {
4607          /* Repeat it until it stops making changes. */
4608          while (do_common_optimization(ir, true, false,
4609                                        &consts->ShaderCompilerOptions[stage],
4610                                        consts->NativeIntegers))
4611             ;
4612       }
4613 }
4614 
4615 void
link_shaders(struct gl_context * ctx,struct gl_shader_program * prog)4616 link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
4617 {
4618    const struct gl_constants *consts = &ctx->Const;
4619    prog->data->LinkStatus = LINKING_SUCCESS; /* All error paths will set this to false */
4620    prog->data->Validated = false;
4621 
4622    /* Section 7.3 (Program Objects) of the OpenGL 4.5 Core Profile spec says:
4623     *
4624     *     "Linking can fail for a variety of reasons as specified in the
4625     *     OpenGL Shading Language Specification, as well as any of the
4626     *     following reasons:
4627     *
4628     *     - No shader objects are attached to program."
4629     *
4630     * The Compatibility Profile specification does not list the error.  In
4631     * Compatibility Profile missing shader stages are replaced by
4632     * fixed-function.  This applies to the case where all stages are
4633     * missing.
4634     */
4635    if (prog->NumShaders == 0) {
4636       if (ctx->API != API_OPENGL_COMPAT)
4637          linker_error(prog, "no shaders attached to the program\n");
4638       return;
4639    }
4640 
4641 #ifdef ENABLE_SHADER_CACHE
4642    if (shader_cache_read_program_metadata(ctx, prog))
4643       return;
4644 #endif
4645 
4646    void *mem_ctx = ralloc_context(NULL); // temporary linker context
4647 
4648    prog->ARB_fragment_coord_conventions_enable = false;
4649 
4650    /* Separate the shaders into groups based on their type.
4651     */
4652    struct gl_shader **shader_list[MESA_SHADER_STAGES];
4653    unsigned num_shaders[MESA_SHADER_STAGES];
4654 
4655    for (int i = 0; i < MESA_SHADER_STAGES; i++) {
4656       shader_list[i] = (struct gl_shader **)
4657          calloc(prog->NumShaders, sizeof(struct gl_shader *));
4658       num_shaders[i] = 0;
4659    }
4660 
4661    unsigned min_version = UINT_MAX;
4662    unsigned max_version = 0;
4663    for (unsigned i = 0; i < prog->NumShaders; i++) {
4664       min_version = MIN2(min_version, prog->Shaders[i]->Version);
4665       max_version = MAX2(max_version, prog->Shaders[i]->Version);
4666 
4667       if (!consts->AllowGLSLRelaxedES &&
4668           prog->Shaders[i]->IsES != prog->Shaders[0]->IsES) {
4669          linker_error(prog, "all shaders must use same shading "
4670                       "language version\n");
4671          goto done;
4672       }
4673 
4674       if (prog->Shaders[i]->ARB_fragment_coord_conventions_enable) {
4675          prog->ARB_fragment_coord_conventions_enable = true;
4676       }
4677 
4678       gl_shader_stage shader_type = prog->Shaders[i]->Stage;
4679       shader_list[shader_type][num_shaders[shader_type]] = prog->Shaders[i];
4680       num_shaders[shader_type]++;
4681    }
4682 
4683    /* In desktop GLSL, different shader versions may be linked together.  In
4684     * GLSL ES, all shader versions must be the same.
4685     */
4686    if (!consts->AllowGLSLRelaxedES && prog->Shaders[0]->IsES &&
4687        min_version != max_version) {
4688       linker_error(prog, "all shaders must use same shading "
4689                    "language version\n");
4690       goto done;
4691    }
4692 
4693    prog->data->Version = max_version;
4694    prog->IsES = prog->Shaders[0]->IsES;
4695 
4696    /* Some shaders have to be linked with some other shaders present.
4697     */
4698    if (!prog->SeparateShader) {
4699       if (num_shaders[MESA_SHADER_GEOMETRY] > 0 &&
4700           num_shaders[MESA_SHADER_VERTEX] == 0) {
4701          linker_error(prog, "Geometry shader must be linked with "
4702                       "vertex shader\n");
4703          goto done;
4704       }
4705       if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
4706           num_shaders[MESA_SHADER_VERTEX] == 0) {
4707          linker_error(prog, "Tessellation evaluation shader must be linked "
4708                       "with vertex shader\n");
4709          goto done;
4710       }
4711       if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
4712           num_shaders[MESA_SHADER_VERTEX] == 0) {
4713          linker_error(prog, "Tessellation control shader must be linked with "
4714                       "vertex shader\n");
4715          goto done;
4716       }
4717 
4718       /* Section 7.3 of the OpenGL ES 3.2 specification says:
4719        *
4720        *    "Linking can fail for [...] any of the following reasons:
4721        *
4722        *     * program contains an object to form a tessellation control
4723        *       shader [...] and [...] the program is not separable and
4724        *       contains no object to form a tessellation evaluation shader"
4725        *
4726        * The OpenGL spec is contradictory. It allows linking without a tess
4727        * eval shader, but that can only be used with transform feedback and
4728        * rasterization disabled. However, transform feedback isn't allowed
4729        * with GL_PATCHES, so it can't be used.
4730        *
4731        * More investigation showed that the idea of transform feedback after
4732        * a tess control shader was dropped, because some hw vendors couldn't
4733        * support tessellation without a tess eval shader, but the linker
4734        * section wasn't updated to reflect that.
4735        *
4736        * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
4737        * spec bug.
4738        *
4739        * Do what's reasonable and always require a tess eval shader if a tess
4740        * control shader is present.
4741        */
4742       if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
4743           num_shaders[MESA_SHADER_TESS_EVAL] == 0) {
4744          linker_error(prog, "Tessellation control shader must be linked with "
4745                       "tessellation evaluation shader\n");
4746          goto done;
4747       }
4748 
4749       if (prog->IsES) {
4750          if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
4751              num_shaders[MESA_SHADER_TESS_CTRL] == 0) {
4752             linker_error(prog, "GLSL ES requires non-separable programs "
4753                          "containing a tessellation evaluation shader to also "
4754                          "be linked with a tessellation control shader\n");
4755             goto done;
4756          }
4757       }
4758    }
4759 
4760    /* Compute shaders have additional restrictions. */
4761    if (num_shaders[MESA_SHADER_COMPUTE] > 0 &&
4762        num_shaders[MESA_SHADER_COMPUTE] != prog->NumShaders) {
4763       linker_error(prog, "Compute shaders may not be linked with any other "
4764                    "type of shader\n");
4765    }
4766 
4767    /* Link all shaders for a particular stage and validate the result.
4768     */
4769    for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
4770       if (num_shaders[stage] > 0) {
4771          gl_linked_shader *const sh =
4772             link_intrastage_shaders(mem_ctx, ctx, prog, shader_list[stage],
4773                                     num_shaders[stage], false);
4774 
4775          if (!prog->data->LinkStatus) {
4776             if (sh)
4777                _mesa_delete_linked_shader(ctx, sh);
4778             goto done;
4779          }
4780 
4781          switch (stage) {
4782          case MESA_SHADER_VERTEX:
4783             validate_vertex_shader_executable(prog, sh, consts);
4784             break;
4785          case MESA_SHADER_TESS_CTRL:
4786             /* nothing to be done */
4787             break;
4788          case MESA_SHADER_TESS_EVAL:
4789             validate_tess_eval_shader_executable(prog, sh, consts);
4790             break;
4791          case MESA_SHADER_GEOMETRY:
4792             validate_geometry_shader_executable(prog, sh, consts);
4793             break;
4794          case MESA_SHADER_FRAGMENT:
4795             validate_fragment_shader_executable(prog, sh);
4796             break;
4797          }
4798          if (!prog->data->LinkStatus) {
4799             if (sh)
4800                _mesa_delete_linked_shader(ctx, sh);
4801             goto done;
4802          }
4803 
4804          prog->_LinkedShaders[stage] = sh;
4805          prog->data->linked_stages |= 1 << stage;
4806       }
4807    }
4808 
4809    /* Here begins the inter-stage linking phase.  Some initial validation is
4810     * performed, then locations are assigned for uniforms, attributes, and
4811     * varyings.
4812     */
4813    cross_validate_uniforms(consts, prog);
4814    if (!prog->data->LinkStatus)
4815       goto done;
4816 
4817    unsigned first, last, prev;
4818 
4819    first = MESA_SHADER_STAGES;
4820    last = 0;
4821 
4822    /* Determine first and last stage. */
4823    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4824       if (!prog->_LinkedShaders[i])
4825          continue;
4826       if (first == MESA_SHADER_STAGES)
4827          first = i;
4828       last = i;
4829    }
4830 
4831    check_explicit_uniform_locations(&ctx->Extensions, prog);
4832    link_assign_subroutine_types(prog);
4833    verify_subroutine_associated_funcs(prog);
4834 
4835    if (!prog->data->LinkStatus)
4836       goto done;
4837 
4838    resize_tes_inputs(consts, prog);
4839 
4840    /* Validate the inputs of each stage with the output of the preceding
4841     * stage.
4842     */
4843    prev = first;
4844    for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
4845       if (prog->_LinkedShaders[i] == NULL)
4846          continue;
4847 
4848       validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
4849                                        prog->_LinkedShaders[i]);
4850       if (!prog->data->LinkStatus)
4851          goto done;
4852 
4853       cross_validate_outputs_to_inputs(consts, prog,
4854                                        prog->_LinkedShaders[prev],
4855                                        prog->_LinkedShaders[i]);
4856       if (!prog->data->LinkStatus)
4857          goto done;
4858 
4859       prev = i;
4860    }
4861 
4862    /* The cross validation of outputs/inputs above validates interstage
4863     * explicit locations. We need to do this also for the inputs in the first
4864     * stage and outputs of the last stage included in the program, since there
4865     * is no cross validation for these.
4866     */
4867    validate_first_and_last_interface_explicit_locations(consts, prog,
4868                                                         (gl_shader_stage) first,
4869                                                         (gl_shader_stage) last);
4870 
4871    /* Cross-validate uniform blocks between shader stages */
4872    validate_interstage_uniform_blocks(prog, prog->_LinkedShaders);
4873    if (!prog->data->LinkStatus)
4874       goto done;
4875 
4876    for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
4877       if (prog->_LinkedShaders[i] != NULL)
4878          lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
4879    }
4880 
4881    if (prog->IsES && prog->data->Version == 100)
4882       if (!validate_invariant_builtins(prog,
4883             prog->_LinkedShaders[MESA_SHADER_VERTEX],
4884             prog->_LinkedShaders[MESA_SHADER_FRAGMENT]))
4885          goto done;
4886 
4887    /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
4888     * it before optimization because we want most of the checks to get
4889     * dropped thanks to constant propagation.
4890     *
4891     * This rule also applies to GLSL ES 3.00.
4892     */
4893    if (max_version >= (prog->IsES ? 300 : 130)) {
4894       struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
4895       if (sh) {
4896          lower_discard_flow(sh->ir);
4897       }
4898    }
4899 
4900    if (prog->SeparateShader)
4901       disable_varying_optimizations_for_sso(prog);
4902 
4903    /* Process UBOs */
4904    if (!interstage_cross_validate_uniform_blocks(prog, false))
4905       goto done;
4906 
4907    /* Process SSBOs */
4908    if (!interstage_cross_validate_uniform_blocks(prog, true))
4909       goto done;
4910 
4911    /* Do common optimization before assigning storage for attributes,
4912     * uniforms, and varyings.  Later optimization could possibly make
4913     * some of that unused.
4914     */
4915    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4916       if (prog->_LinkedShaders[i] == NULL)
4917          continue;
4918 
4919       detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
4920       if (!prog->data->LinkStatus)
4921          goto done;
4922 
4923       if (consts->ShaderCompilerOptions[i].LowerCombinedClipCullDistance) {
4924          lower_clip_cull_distance(prog, prog->_LinkedShaders[i]);
4925       }
4926 
4927       if (consts->LowerTessLevel) {
4928          lower_tess_level(prog->_LinkedShaders[i]);
4929       }
4930 
4931       /* Section 13.46 (Vertex Attribute Aliasing) of the OpenGL ES 3.2
4932        * specification says:
4933        *
4934        *    "In general, the behavior of GLSL ES should not depend on compiler
4935        *    optimizations which might be implementation-dependent. Name matching
4936        *    rules in most languages, including C++ from which GLSL ES is derived,
4937        *    are based on declarations rather than use.
4938        *
4939        *    RESOLUTION: The existence of aliasing is determined by declarations
4940        *    present after preprocessing."
4941        *
4942        * Because of this rule, we do a 'dry-run' of attribute assignment for
4943        * vertex shader inputs here.
4944        */
4945       if (prog->IsES && i == MESA_SHADER_VERTEX) {
4946          if (!assign_attribute_or_color_locations(mem_ctx, prog, consts,
4947                                                   MESA_SHADER_VERTEX, false)) {
4948             goto done;
4949          }
4950       }
4951 
4952       /* Call opts before lowering const arrays to uniforms so we can const
4953        * propagate any elements accessed directly.
4954        */
4955       linker_optimisation_loop(consts, prog->_LinkedShaders[i]->ir, i);
4956 
4957       /* Call opts after lowering const arrays to copy propagate things. */
4958       if (consts->GLSLLowerConstArrays &&
4959           lower_const_arrays_to_uniforms(prog->_LinkedShaders[i]->ir, i,
4960                                          consts->Program[i].MaxUniformComponents))
4961          linker_optimisation_loop(consts, prog->_LinkedShaders[i]->ir, i);
4962 
4963    }
4964 
4965    /* Validation for special cases where we allow sampler array indexing
4966     * with loop induction variable. This check emits a warning or error
4967     * depending if backend can handle dynamic indexing.
4968     */
4969    if ((!prog->IsES && prog->data->Version < 130) ||
4970        (prog->IsES && prog->data->Version < 300)) {
4971       if (!validate_sampler_array_indexing(consts, prog))
4972          goto done;
4973    }
4974 
4975    /* Check and validate stream emissions in geometry shaders */
4976    validate_geometry_shader_emissions(consts, prog);
4977 
4978    store_fragdepth_layout(prog);
4979 
4980    if(!link_varyings_and_uniforms(first, last, consts,
4981                                   &ctx->Extensions, ctx->API, prog, mem_ctx))
4982       goto done;
4983 
4984    /* Linking varyings can cause some extra, useless swizzles to be generated
4985     * due to packing and unpacking.
4986     */
4987    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4988       if (prog->_LinkedShaders[i] == NULL)
4989          continue;
4990 
4991       optimize_swizzles(prog->_LinkedShaders[i]->ir);
4992    }
4993 
4994    /* OpenGL ES < 3.1 requires that a vertex shader and a fragment shader both
4995     * be present in a linked program. GL_ARB_ES2_compatibility doesn't say
4996     * anything about shader linking when one of the shaders (vertex or
4997     * fragment shader) is absent. So, the extension shouldn't change the
4998     * behavior specified in GLSL specification.
4999     *
5000     * From OpenGL ES 3.1 specification (7.3 Program Objects):
5001     *     "Linking can fail for a variety of reasons as specified in the
5002     *     OpenGL ES Shading Language Specification, as well as any of the
5003     *     following reasons:
5004     *
5005     *     ...
5006     *
5007     *     * program contains objects to form either a vertex shader or
5008     *       fragment shader, and program is not separable, and does not
5009     *       contain objects to form both a vertex shader and fragment
5010     *       shader."
5011     *
5012     * However, the only scenario in 3.1+ where we don't require them both is
5013     * when we have a compute shader. For example:
5014     *
5015     * - No shaders is a link error.
5016     * - Geom or Tess without a Vertex shader is a link error which means we
5017     *   always require a Vertex shader and hence a Fragment shader.
5018     * - Finally a Compute shader linked with any other stage is a link error.
5019     */
5020    if (!prog->SeparateShader && ctx->API == API_OPENGLES2 &&
5021        num_shaders[MESA_SHADER_COMPUTE] == 0) {
5022       if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
5023          linker_error(prog, "program lacks a vertex shader\n");
5024       } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
5025          linker_error(prog, "program lacks a fragment shader\n");
5026       }
5027    }
5028 
5029 done:
5030    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
5031       free(shader_list[i]);
5032       if (prog->_LinkedShaders[i] == NULL)
5033          continue;
5034 
5035       /* Do a final validation step to make sure that the IR wasn't
5036        * invalidated by any modifications performed after intrastage linking.
5037        */
5038       validate_ir_tree(prog->_LinkedShaders[i]->ir);
5039 
5040       /* Retain any live IR, but trash the rest. */
5041       reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
5042 
5043       /* The symbol table in the linked shaders may contain references to
5044        * variables that were removed (e.g., unused uniforms).  Since it may
5045        * contain junk, there is no possible valid use.  Delete it and set the
5046        * pointer to NULL.
5047        */
5048       delete prog->_LinkedShaders[i]->symbols;
5049       prog->_LinkedShaders[i]->symbols = NULL;
5050    }
5051 
5052    ralloc_free(mem_ctx);
5053 }
5054 
5055 void
resource_name_updated(struct gl_resource_name * name)5056 resource_name_updated(struct gl_resource_name *name)
5057 {
5058    if (name->string) {
5059       name->length = strlen(name->string);
5060 
5061       const char *last_square_bracket = strrchr(name->string, '[');
5062       if (last_square_bracket) {
5063          name->last_square_bracket = last_square_bracket - name->string;
5064          name->suffix_is_zero_square_bracketed =
5065             strcmp(last_square_bracket, "[0]") == 0;
5066       } else {
5067          name->last_square_bracket = -1;
5068          name->suffix_is_zero_square_bracketed = false;
5069       }
5070    } else {
5071       name->length = 0;
5072       name->last_square_bracket = -1;
5073       name->suffix_is_zero_square_bracketed = false;
5074    }
5075 }
5076