1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file linker.cpp
26  * GLSL linker implementation
27  *
28  * Given a set of shaders that are to be linked to generate a final program,
29  * there are three distinct stages.
30  *
31  * In the first stage shaders are partitioned into groups based on the shader
32  * type.  All shaders of a particular type (e.g., vertex shaders) are linked
33  * together.
34  *
35  *   - Undefined references in each shader are resolve to definitions in
36  *     another shader.
37  *   - Types and qualifiers of uniforms, outputs, and global variables defined
38  *     in multiple shaders with the same name are verified to be the same.
39  *   - Initializers for uniforms and global variables defined
40  *     in multiple shaders with the same name are verified to be the same.
41  *
42  * The result, in the terminology of the GLSL spec, is a set of shader
43  * executables for each processing unit.
44  *
45  * After the first stage is complete, a series of semantic checks are performed
46  * on each of the shader executables.
47  *
48  *   - Each shader executable must define a \c main function.
49  *   - Each vertex shader executable must write to \c gl_Position.
50  *   - Each fragment shader executable must write to either \c gl_FragData or
51  *     \c gl_FragColor.
52  *
53  * In the final stage individual shader executables are linked to create a
54  * complete exectuable.
55  *
56  *   - Types of uniforms defined in multiple shader stages with the same name
57  *     are verified to be the same.
58  *   - Initializers for uniforms defined in multiple shader stages with the
59  *     same name are verified to be the same.
60  *   - Types and qualifiers of outputs defined in one stage are verified to
61  *     be the same as the types and qualifiers of inputs defined with the same
62  *     name in a later stage.
63  *
64  * \author Ian Romanick <ian.d.romanick@intel.com>
65  */
66 
67 #include <ctype.h>
68 #include "util/strndup.h"
69 #include "glsl_symbol_table.h"
70 #include "glsl_parser_extras.h"
71 #include "ir.h"
72 #include "program.h"
73 #include "program/prog_instruction.h"
74 #include "program/program.h"
75 #include "util/mesa-sha1.h"
76 #include "util/set.h"
77 #include "string_to_uint_map.h"
78 #include "linker.h"
79 #include "linker_util.h"
80 #include "link_varyings.h"
81 #include "ir_optimization.h"
82 #include "ir_rvalue_visitor.h"
83 #include "ir_uniform.h"
84 #include "builtin_functions.h"
85 #include "shader_cache.h"
86 #include "util/u_string.h"
87 #include "util/u_math.h"
88 
89 
90 #include "main/shaderobj.h"
91 #include "main/enums.h"
92 #include "main/mtypes.h"
93 
94 
95 namespace {
96 
97 struct find_variable {
98    const char *name;
99    bool found;
100 
find_variable__anon29e33d3d0111::find_variable101    find_variable(const char *name) : name(name), found(false) {}
102 };
103 
104 /**
105  * Visitor that determines whether or not a variable is ever written.
106  *
107  * Use \ref find_assignments for convenience.
108  */
109 class find_assignment_visitor : public ir_hierarchical_visitor {
110 public:
find_assignment_visitor(unsigned num_vars,find_variable * const * vars)111    find_assignment_visitor(unsigned num_vars,
112                            find_variable * const *vars)
113       : num_variables(num_vars), num_found(0), variables(vars)
114    {
115    }
116 
visit_enter(ir_assignment * ir)117    virtual ir_visitor_status visit_enter(ir_assignment *ir)
118    {
119       ir_variable *const var = ir->lhs->variable_referenced();
120 
121       return check_variable_name(var->name);
122    }
123 
visit_enter(ir_call * ir)124    virtual ir_visitor_status visit_enter(ir_call *ir)
125    {
126       foreach_two_lists(formal_node, &ir->callee->parameters,
127                         actual_node, &ir->actual_parameters) {
128          ir_rvalue *param_rval = (ir_rvalue *) actual_node;
129          ir_variable *sig_param = (ir_variable *) formal_node;
130 
131          if (sig_param->data.mode == ir_var_function_out ||
132              sig_param->data.mode == ir_var_function_inout) {
133             ir_variable *var = param_rval->variable_referenced();
134             if (var && check_variable_name(var->name) == visit_stop)
135                return visit_stop;
136          }
137       }
138 
139       if (ir->return_deref != NULL) {
140          ir_variable *const var = ir->return_deref->variable_referenced();
141 
142          if (check_variable_name(var->name) == visit_stop)
143             return visit_stop;
144       }
145 
146       return visit_continue_with_parent;
147    }
148 
149 private:
check_variable_name(const char * name)150    ir_visitor_status check_variable_name(const char *name)
151    {
152       for (unsigned i = 0; i < num_variables; ++i) {
153          if (strcmp(variables[i]->name, name) == 0) {
154             if (!variables[i]->found) {
155                variables[i]->found = true;
156 
157                assert(num_found < num_variables);
158                if (++num_found == num_variables)
159                   return visit_stop;
160             }
161             break;
162          }
163       }
164 
165       return visit_continue_with_parent;
166    }
167 
168 private:
169    unsigned num_variables;           /**< Number of variables to find */
170    unsigned num_found;               /**< Number of variables already found */
171    find_variable * const *variables; /**< Variables to find */
172 };
173 
174 /**
175  * Determine whether or not any of NULL-terminated list of variables is ever
176  * written to.
177  */
178 static void
find_assignments(exec_list * ir,find_variable * const * vars)179 find_assignments(exec_list *ir, find_variable * const *vars)
180 {
181    unsigned num_variables = 0;
182 
183    for (find_variable * const *v = vars; *v; ++v)
184       num_variables++;
185 
186    find_assignment_visitor visitor(num_variables, vars);
187    visitor.run(ir);
188 }
189 
190 /**
191  * Determine whether or not the given variable is ever written to.
192  */
193 static void
find_assignments(exec_list * ir,find_variable * var)194 find_assignments(exec_list *ir, find_variable *var)
195 {
196    find_assignment_visitor visitor(1, &var);
197    visitor.run(ir);
198 }
199 
200 /**
201  * Visitor that determines whether or not a variable is ever read.
202  */
203 class find_deref_visitor : public ir_hierarchical_visitor {
204 public:
find_deref_visitor(const char * name)205    find_deref_visitor(const char *name)
206       : name(name), found(false)
207    {
208       /* empty */
209    }
210 
visit(ir_dereference_variable * ir)211    virtual ir_visitor_status visit(ir_dereference_variable *ir)
212    {
213       if (strcmp(this->name, ir->var->name) == 0) {
214          this->found = true;
215          return visit_stop;
216       }
217 
218       return visit_continue;
219    }
220 
variable_found() const221    bool variable_found() const
222    {
223       return this->found;
224    }
225 
226 private:
227    const char *name;       /**< Find writes to a variable with this name. */
228    bool found;             /**< Was a write to the variable found? */
229 };
230 
231 
232 /**
233  * A visitor helper that provides methods for updating the types of
234  * ir_dereferences.  Classes that update variable types (say, updating
235  * array sizes) will want to use this so that dereference types stay in sync.
236  */
237 class deref_type_updater : public ir_hierarchical_visitor {
238 public:
visit(ir_dereference_variable * ir)239    virtual ir_visitor_status visit(ir_dereference_variable *ir)
240    {
241       ir->type = ir->var->type;
242       return visit_continue;
243    }
244 
visit_leave(ir_dereference_array * ir)245    virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
246    {
247       const glsl_type *const vt = ir->array->type;
248       if (vt->is_array())
249          ir->type = vt->fields.array;
250       return visit_continue;
251    }
252 
visit_leave(ir_dereference_record * ir)253    virtual ir_visitor_status visit_leave(ir_dereference_record *ir)
254    {
255       ir->type = ir->record->type->fields.structure[ir->field_idx].type;
256       return visit_continue;
257    }
258 };
259 
260 
261 class array_resize_visitor : public deref_type_updater {
262 public:
263    using deref_type_updater::visit;
264 
265    unsigned num_vertices;
266    gl_shader_program *prog;
267    gl_shader_stage stage;
268 
array_resize_visitor(unsigned num_vertices,gl_shader_program * prog,gl_shader_stage stage)269    array_resize_visitor(unsigned num_vertices,
270                         gl_shader_program *prog,
271                         gl_shader_stage stage)
272    {
273       this->num_vertices = num_vertices;
274       this->prog = prog;
275       this->stage = stage;
276    }
277 
~array_resize_visitor()278    virtual ~array_resize_visitor()
279    {
280       /* empty */
281    }
282 
visit(ir_variable * var)283    virtual ir_visitor_status visit(ir_variable *var)
284    {
285       if (!var->type->is_array() || var->data.mode != ir_var_shader_in ||
286           var->data.patch)
287          return visit_continue;
288 
289       unsigned size = var->type->length;
290 
291       if (stage == MESA_SHADER_GEOMETRY) {
292          /* Generate a link error if the shader has declared this array with
293           * an incorrect size.
294           */
295          if (!var->data.implicit_sized_array &&
296              size && size != this->num_vertices) {
297             linker_error(this->prog, "size of array %s declared as %u, "
298                          "but number of input vertices is %u\n",
299                          var->name, size, this->num_vertices);
300             return visit_continue;
301          }
302 
303          /* Generate a link error if the shader attempts to access an input
304           * array using an index too large for its actual size assigned at
305           * link time.
306           */
307          if (var->data.max_array_access >= (int)this->num_vertices) {
308             linker_error(this->prog, "%s shader accesses element %i of "
309                          "%s, but only %i input vertices\n",
310                          _mesa_shader_stage_to_string(this->stage),
311                          var->data.max_array_access, var->name, this->num_vertices);
312             return visit_continue;
313          }
314       }
315 
316       var->type = glsl_type::get_array_instance(var->type->fields.array,
317                                                 this->num_vertices);
318       var->data.max_array_access = this->num_vertices - 1;
319 
320       return visit_continue;
321    }
322 };
323 
324 class array_length_to_const_visitor : public ir_rvalue_visitor {
325 public:
array_length_to_const_visitor()326    array_length_to_const_visitor()
327    {
328       this->progress = false;
329    }
330 
~array_length_to_const_visitor()331    virtual ~array_length_to_const_visitor()
332    {
333       /* empty */
334    }
335 
336    bool progress;
337 
handle_rvalue(ir_rvalue ** rvalue)338    virtual void handle_rvalue(ir_rvalue **rvalue)
339    {
340       if (*rvalue == NULL || (*rvalue)->ir_type != ir_type_expression)
341          return;
342 
343       ir_expression *expr = (*rvalue)->as_expression();
344       if (expr) {
345          if (expr->operation == ir_unop_implicitly_sized_array_length) {
346             assert(!expr->operands[0]->type->is_unsized_array());
347             ir_constant *constant = new(expr)
348                ir_constant(expr->operands[0]->type->array_size());
349             if (constant) {
350                *rvalue = constant;
351             }
352          }
353       }
354    }
355 };
356 
357 /**
358  * Visitor that determines the highest stream id to which a (geometry) shader
359  * emits vertices. It also checks whether End{Stream}Primitive is ever called.
360  */
361 class find_emit_vertex_visitor : public ir_hierarchical_visitor {
362 public:
find_emit_vertex_visitor(int max_allowed)363    find_emit_vertex_visitor(int max_allowed)
364       : max_stream_allowed(max_allowed),
365         invalid_stream_id(0),
366         invalid_stream_id_from_emit_vertex(false),
367         end_primitive_found(false),
368         used_streams(0)
369    {
370       /* empty */
371    }
372 
visit_leave(ir_emit_vertex * ir)373    virtual ir_visitor_status visit_leave(ir_emit_vertex *ir)
374    {
375       int stream_id = ir->stream_id();
376 
377       if (stream_id < 0) {
378          invalid_stream_id = stream_id;
379          invalid_stream_id_from_emit_vertex = true;
380          return visit_stop;
381       }
382 
383       if (stream_id > max_stream_allowed) {
384          invalid_stream_id = stream_id;
385          invalid_stream_id_from_emit_vertex = true;
386          return visit_stop;
387       }
388 
389       used_streams |= 1 << stream_id;
390 
391       return visit_continue;
392    }
393 
visit_leave(ir_end_primitive * ir)394    virtual ir_visitor_status visit_leave(ir_end_primitive *ir)
395    {
396       end_primitive_found = true;
397 
398       int stream_id = ir->stream_id();
399 
400       if (stream_id < 0) {
401          invalid_stream_id = stream_id;
402          invalid_stream_id_from_emit_vertex = false;
403          return visit_stop;
404       }
405 
406       if (stream_id > max_stream_allowed) {
407          invalid_stream_id = stream_id;
408          invalid_stream_id_from_emit_vertex = false;
409          return visit_stop;
410       }
411 
412       used_streams |= 1 << stream_id;
413 
414       return visit_continue;
415    }
416 
error()417    bool error()
418    {
419       return invalid_stream_id != 0;
420    }
421 
error_func()422    const char *error_func()
423    {
424       return invalid_stream_id_from_emit_vertex ?
425          "EmitStreamVertex" : "EndStreamPrimitive";
426    }
427 
error_stream()428    int error_stream()
429    {
430       return invalid_stream_id;
431    }
432 
active_stream_mask()433    unsigned active_stream_mask()
434    {
435       return used_streams;
436    }
437 
uses_end_primitive()438    bool uses_end_primitive()
439    {
440       return end_primitive_found;
441    }
442 
443 private:
444    int max_stream_allowed;
445    int invalid_stream_id;
446    bool invalid_stream_id_from_emit_vertex;
447    bool end_primitive_found;
448    unsigned used_streams;
449 };
450 
451 /* Class that finds array derefs and check if indexes are dynamic. */
452 class dynamic_sampler_array_indexing_visitor : public ir_hierarchical_visitor
453 {
454 public:
dynamic_sampler_array_indexing_visitor()455    dynamic_sampler_array_indexing_visitor() :
456       dynamic_sampler_array_indexing(false)
457    {
458    }
459 
visit_enter(ir_dereference_array * ir)460    ir_visitor_status visit_enter(ir_dereference_array *ir)
461    {
462       if (!ir->variable_referenced())
463          return visit_continue;
464 
465       if (!ir->variable_referenced()->type->contains_sampler())
466          return visit_continue;
467 
468       if (!ir->array_index->constant_expression_value(ralloc_parent(ir))) {
469          dynamic_sampler_array_indexing = true;
470          return visit_stop;
471       }
472       return visit_continue;
473    }
474 
uses_dynamic_sampler_array_indexing()475    bool uses_dynamic_sampler_array_indexing()
476    {
477       return dynamic_sampler_array_indexing;
478    }
479 
480 private:
481    bool dynamic_sampler_array_indexing;
482 };
483 
484 } /* anonymous namespace */
485 
486 void
linker_error(gl_shader_program * prog,const char * fmt,...)487 linker_error(gl_shader_program *prog, const char *fmt, ...)
488 {
489    va_list ap;
490 
491    ralloc_strcat(&prog->data->InfoLog, "error: ");
492    va_start(ap, fmt);
493    ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
494    va_end(ap);
495 
496    prog->data->LinkStatus = LINKING_FAILURE;
497 }
498 
499 
500 void
linker_warning(gl_shader_program * prog,const char * fmt,...)501 linker_warning(gl_shader_program *prog, const char *fmt, ...)
502 {
503    va_list ap;
504 
505    ralloc_strcat(&prog->data->InfoLog, "warning: ");
506    va_start(ap, fmt);
507    ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
508    va_end(ap);
509 
510 }
511 
512 
513 /**
514  * Given a string identifying a program resource, break it into a base name
515  * and an optional array index in square brackets.
516  *
517  * If an array index is present, \c out_base_name_end is set to point to the
518  * "[" that precedes the array index, and the array index itself is returned
519  * as a long.
520  *
521  * If no array index is present (or if the array index is negative or
522  * mal-formed), \c out_base_name_end, is set to point to the null terminator
523  * at the end of the input string, and -1 is returned.
524  *
525  * Only the final array index is parsed; if the string contains other array
526  * indices (or structure field accesses), they are left in the base name.
527  *
528  * No attempt is made to check that the base name is properly formed;
529  * typically the caller will look up the base name in a hash table, so
530  * ill-formed base names simply turn into hash table lookup failures.
531  */
532 long
parse_program_resource_name(const GLchar * name,const size_t len,const GLchar ** out_base_name_end)533 parse_program_resource_name(const GLchar *name,
534                             const size_t len,
535                             const GLchar **out_base_name_end)
536 {
537    /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
538     *
539     *     "When an integer array element or block instance number is part of
540     *     the name string, it will be specified in decimal form without a "+"
541     *     or "-" sign or any extra leading zeroes. Additionally, the name
542     *     string will not include white space anywhere in the string."
543     */
544 
545    *out_base_name_end = name + len;
546 
547    if (len == 0 || name[len-1] != ']')
548       return -1;
549 
550    /* Walk backwards over the string looking for a non-digit character.  This
551     * had better be the opening bracket for an array index.
552     *
553     * Initially, i specifies the location of the ']'.  Since the string may
554     * contain only the ']' charcater, walk backwards very carefully.
555     */
556    unsigned i;
557    for (i = len - 1; (i > 0) && isdigit(name[i-1]); --i)
558       /* empty */ ;
559 
560    if ((i == 0) || name[i-1] != '[')
561       return -1;
562 
563    long array_index = strtol(&name[i], NULL, 10);
564    if (array_index < 0)
565       return -1;
566 
567    /* Check for leading zero */
568    if (name[i] == '0' && name[i+1] != ']')
569       return -1;
570 
571    *out_base_name_end = name + (i - 1);
572    return array_index;
573 }
574 
575 
576 void
link_invalidate_variable_locations(exec_list * ir)577 link_invalidate_variable_locations(exec_list *ir)
578 {
579    foreach_in_list(ir_instruction, node, ir) {
580       ir_variable *const var = node->as_variable();
581 
582       if (var == NULL)
583          continue;
584 
585       /* Only assign locations for variables that lack an explicit location.
586        * Explicit locations are set for all built-in variables, generic vertex
587        * shader inputs (via layout(location=...)), and generic fragment shader
588        * outputs (also via layout(location=...)).
589        */
590       if (!var->data.explicit_location) {
591          var->data.location = -1;
592          var->data.location_frac = 0;
593       }
594 
595       /* ir_variable::is_unmatched_generic_inout is used by the linker while
596        * connecting outputs from one stage to inputs of the next stage.
597        */
598       if (var->data.explicit_location &&
599           var->data.location < VARYING_SLOT_VAR0) {
600          var->data.is_unmatched_generic_inout = 0;
601       } else {
602          var->data.is_unmatched_generic_inout = 1;
603       }
604    }
605 }
606 
607 
608 /**
609  * Set clip_distance_array_size based and cull_distance_array_size on the given
610  * shader.
611  *
612  * Also check for errors based on incorrect usage of gl_ClipVertex and
613  * gl_ClipDistance and gl_CullDistance.
614  * Additionally test whether the arrays gl_ClipDistance and gl_CullDistance
615  * exceed the maximum size defined by gl_MaxCombinedClipAndCullDistances.
616  *
617  * Return false if an error was reported.
618  */
619 static void
analyze_clip_cull_usage(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx,struct shader_info * info)620 analyze_clip_cull_usage(struct gl_shader_program *prog,
621                         struct gl_linked_shader *shader,
622                         struct gl_context *ctx,
623                         struct shader_info *info)
624 {
625    info->clip_distance_array_size = 0;
626    info->cull_distance_array_size = 0;
627 
628    if (prog->data->Version >= (prog->IsES ? 300 : 130)) {
629       /* From section 7.1 (Vertex Shader Special Variables) of the
630        * GLSL 1.30 spec:
631        *
632        *   "It is an error for a shader to statically write both
633        *   gl_ClipVertex and gl_ClipDistance."
634        *
635        * This does not apply to GLSL ES shaders, since GLSL ES defines neither
636        * gl_ClipVertex nor gl_ClipDistance. However with
637        * GL_EXT_clip_cull_distance, this functionality is exposed in ES 3.0.
638        */
639       find_variable gl_ClipDistance("gl_ClipDistance");
640       find_variable gl_CullDistance("gl_CullDistance");
641       find_variable gl_ClipVertex("gl_ClipVertex");
642       find_variable * const variables[] = {
643          &gl_ClipDistance,
644          &gl_CullDistance,
645          !prog->IsES ? &gl_ClipVertex : NULL,
646          NULL
647       };
648       find_assignments(shader->ir, variables);
649 
650       /* From the ARB_cull_distance spec:
651        *
652        * It is a compile-time or link-time error for the set of shaders forming
653        * a program to statically read or write both gl_ClipVertex and either
654        * gl_ClipDistance or gl_CullDistance.
655        *
656        * This does not apply to GLSL ES shaders, since GLSL ES doesn't define
657        * gl_ClipVertex.
658        */
659       if (!prog->IsES) {
660          if (gl_ClipVertex.found && gl_ClipDistance.found) {
661             linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
662                          "and `gl_ClipDistance'\n",
663                          _mesa_shader_stage_to_string(shader->Stage));
664             return;
665          }
666          if (gl_ClipVertex.found && gl_CullDistance.found) {
667             linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
668                          "and `gl_CullDistance'\n",
669                          _mesa_shader_stage_to_string(shader->Stage));
670             return;
671          }
672       }
673 
674       if (gl_ClipDistance.found) {
675          ir_variable *clip_distance_var =
676                 shader->symbols->get_variable("gl_ClipDistance");
677          assert(clip_distance_var);
678          info->clip_distance_array_size = clip_distance_var->type->length;
679       }
680       if (gl_CullDistance.found) {
681          ir_variable *cull_distance_var =
682                 shader->symbols->get_variable("gl_CullDistance");
683          assert(cull_distance_var);
684          info->cull_distance_array_size = cull_distance_var->type->length;
685       }
686       /* From the ARB_cull_distance spec:
687        *
688        * It is a compile-time or link-time error for the set of shaders forming
689        * a program to have the sum of the sizes of the gl_ClipDistance and
690        * gl_CullDistance arrays to be larger than
691        * gl_MaxCombinedClipAndCullDistances.
692        */
693       if ((uint32_t)(info->clip_distance_array_size + info->cull_distance_array_size) >
694           ctx->Const.MaxClipPlanes) {
695           linker_error(prog, "%s shader: the combined size of "
696                        "'gl_ClipDistance' and 'gl_CullDistance' size cannot "
697                        "be larger than "
698                        "gl_MaxCombinedClipAndCullDistances (%u)",
699                        _mesa_shader_stage_to_string(shader->Stage),
700                        ctx->Const.MaxClipPlanes);
701       }
702    }
703 }
704 
705 
706 /**
707  * Verify that a vertex shader executable meets all semantic requirements.
708  *
709  * Also sets info.clip_distance_array_size and
710  * info.cull_distance_array_size as a side effect.
711  *
712  * \param shader  Vertex shader executable to be verified
713  */
714 static void
validate_vertex_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx)715 validate_vertex_shader_executable(struct gl_shader_program *prog,
716                                   struct gl_linked_shader *shader,
717                                   struct gl_context *ctx)
718 {
719    if (shader == NULL)
720       return;
721 
722    /* From the GLSL 1.10 spec, page 48:
723     *
724     *     "The variable gl_Position is available only in the vertex
725     *      language and is intended for writing the homogeneous vertex
726     *      position. All executions of a well-formed vertex shader
727     *      executable must write a value into this variable. [...] The
728     *      variable gl_Position is available only in the vertex
729     *      language and is intended for writing the homogeneous vertex
730     *      position. All executions of a well-formed vertex shader
731     *      executable must write a value into this variable."
732     *
733     * while in GLSL 1.40 this text is changed to:
734     *
735     *     "The variable gl_Position is available only in the vertex
736     *      language and is intended for writing the homogeneous vertex
737     *      position. It can be written at any time during shader
738     *      execution. It may also be read back by a vertex shader
739     *      after being written. This value will be used by primitive
740     *      assembly, clipping, culling, and other fixed functionality
741     *      operations, if present, that operate on primitives after
742     *      vertex processing has occurred. Its value is undefined if
743     *      the vertex shader executable does not write gl_Position."
744     *
745     * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
746     * gl_Position is not an error.
747     */
748    if (prog->data->Version < (prog->IsES ? 300 : 140)) {
749       find_variable gl_Position("gl_Position");
750       find_assignments(shader->ir, &gl_Position);
751       if (!gl_Position.found) {
752         if (prog->IsES) {
753           linker_warning(prog,
754                          "vertex shader does not write to `gl_Position'. "
755                          "Its value is undefined. \n");
756         } else {
757           linker_error(prog,
758                        "vertex shader does not write to `gl_Position'. \n");
759         }
760          return;
761       }
762    }
763 
764    analyze_clip_cull_usage(prog, shader, ctx, &shader->Program->info);
765 }
766 
767 static void
validate_tess_eval_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx)768 validate_tess_eval_shader_executable(struct gl_shader_program *prog,
769                                      struct gl_linked_shader *shader,
770                                      struct gl_context *ctx)
771 {
772    if (shader == NULL)
773       return;
774 
775    analyze_clip_cull_usage(prog, shader, ctx, &shader->Program->info);
776 }
777 
778 
779 /**
780  * Verify that a fragment shader executable meets all semantic requirements
781  *
782  * \param shader  Fragment shader executable to be verified
783  */
784 static void
validate_fragment_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader)785 validate_fragment_shader_executable(struct gl_shader_program *prog,
786                                     struct gl_linked_shader *shader)
787 {
788    if (shader == NULL)
789       return;
790 
791    find_variable gl_FragColor("gl_FragColor");
792    find_variable gl_FragData("gl_FragData");
793    find_variable * const variables[] = { &gl_FragColor, &gl_FragData, NULL };
794    find_assignments(shader->ir, variables);
795 
796    if (gl_FragColor.found && gl_FragData.found) {
797       linker_error(prog,  "fragment shader writes to both "
798                    "`gl_FragColor' and `gl_FragData'\n");
799    }
800 }
801 
802 /**
803  * Verify that a geometry shader executable meets all semantic requirements
804  *
805  * Also sets prog->Geom.VerticesIn, and info.clip_distance_array_sizeand
806  * info.cull_distance_array_size as a side effect.
807  *
808  * \param shader Geometry shader executable to be verified
809  */
810 static void
validate_geometry_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx)811 validate_geometry_shader_executable(struct gl_shader_program *prog,
812                                     struct gl_linked_shader *shader,
813                                     struct gl_context *ctx)
814 {
815    if (shader == NULL)
816       return;
817 
818    unsigned num_vertices =
819       vertices_per_prim(shader->Program->info.gs.input_primitive);
820    prog->Geom.VerticesIn = num_vertices;
821 
822    analyze_clip_cull_usage(prog, shader, ctx, &shader->Program->info);
823 }
824 
825 /**
826  * Check if geometry shaders emit to non-zero streams and do corresponding
827  * validations.
828  */
829 static void
validate_geometry_shader_emissions(struct gl_context * ctx,struct gl_shader_program * prog)830 validate_geometry_shader_emissions(struct gl_context *ctx,
831                                    struct gl_shader_program *prog)
832 {
833    struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
834 
835    if (sh != NULL) {
836       find_emit_vertex_visitor emit_vertex(ctx->Const.MaxVertexStreams - 1);
837       emit_vertex.run(sh->ir);
838       if (emit_vertex.error()) {
839          linker_error(prog, "Invalid call %s(%d). Accepted values for the "
840                       "stream parameter are in the range [0, %d].\n",
841                       emit_vertex.error_func(),
842                       emit_vertex.error_stream(),
843                       ctx->Const.MaxVertexStreams - 1);
844       }
845       prog->Geom.ActiveStreamMask = emit_vertex.active_stream_mask();
846       prog->Geom.UsesEndPrimitive = emit_vertex.uses_end_primitive();
847 
848       /* From the ARB_gpu_shader5 spec:
849        *
850        *   "Multiple vertex streams are supported only if the output primitive
851        *    type is declared to be "points".  A program will fail to link if it
852        *    contains a geometry shader calling EmitStreamVertex() or
853        *    EndStreamPrimitive() if its output primitive type is not "points".
854        *
855        * However, in the same spec:
856        *
857        *   "The function EmitVertex() is equivalent to calling EmitStreamVertex()
858        *    with <stream> set to zero."
859        *
860        * And:
861        *
862        *   "The function EndPrimitive() is equivalent to calling
863        *    EndStreamPrimitive() with <stream> set to zero."
864        *
865        * Since we can call EmitVertex() and EndPrimitive() when we output
866        * primitives other than points, calling EmitStreamVertex(0) or
867        * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
868        * does. We can use prog->Geom.ActiveStreamMask to check whether only the
869        * first (zero) stream is active.
870        * stream.
871        */
872       if (prog->Geom.ActiveStreamMask & ~(1 << 0) &&
873           sh->Program->info.gs.output_primitive != GL_POINTS) {
874          linker_error(prog, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
875                       "with n>0 requires point output\n");
876       }
877    }
878 }
879 
880 bool
validate_intrastage_arrays(struct gl_shader_program * prog,ir_variable * const var,ir_variable * const existing,bool match_precision)881 validate_intrastage_arrays(struct gl_shader_program *prog,
882                            ir_variable *const var,
883                            ir_variable *const existing,
884                            bool match_precision)
885 {
886    /* Consider the types to be "the same" if both types are arrays
887     * of the same type and one of the arrays is implicitly sized.
888     * In addition, set the type of the linked variable to the
889     * explicitly sized array.
890     */
891    if (var->type->is_array() && existing->type->is_array()) {
892       const glsl_type *no_array_var = var->type->fields.array;
893       const glsl_type *no_array_existing = existing->type->fields.array;
894       bool type_matches;
895 
896       type_matches = (match_precision ?
897                       no_array_var == no_array_existing :
898                       no_array_var->compare_no_precision(no_array_existing));
899 
900       if (type_matches &&
901           ((var->type->length == 0)|| (existing->type->length == 0))) {
902          if (var->type->length != 0) {
903             if ((int)var->type->length <= existing->data.max_array_access) {
904                linker_error(prog, "%s `%s' declared as type "
905                            "`%s' but outermost dimension has an index"
906                            " of `%i'\n",
907                            mode_string(var),
908                            var->name, var->type->name,
909                            existing->data.max_array_access);
910             }
911             existing->type = var->type;
912             return true;
913          } else if (existing->type->length != 0) {
914             if((int)existing->type->length <= var->data.max_array_access &&
915                !existing->data.from_ssbo_unsized_array) {
916                linker_error(prog, "%s `%s' declared as type "
917                            "`%s' but outermost dimension has an index"
918                            " of `%i'\n",
919                            mode_string(var),
920                            var->name, existing->type->name,
921                            var->data.max_array_access);
922             }
923             return true;
924          }
925       }
926    }
927    return false;
928 }
929 
930 
931 /**
932  * Perform validation of global variables used across multiple shaders
933  */
934 static void
cross_validate_globals(struct gl_context * ctx,struct gl_shader_program * prog,struct exec_list * ir,glsl_symbol_table * variables,bool uniforms_only)935 cross_validate_globals(struct gl_context *ctx, struct gl_shader_program *prog,
936                        struct exec_list *ir, glsl_symbol_table *variables,
937                        bool uniforms_only)
938 {
939    foreach_in_list(ir_instruction, node, ir) {
940       ir_variable *const var = node->as_variable();
941 
942       if (var == NULL)
943          continue;
944 
945       if (uniforms_only && (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage))
946          continue;
947 
948       /* don't cross validate subroutine uniforms */
949       if (var->type->contains_subroutine())
950          continue;
951 
952       /* Don't cross validate interface instances. These are only relevant
953        * inside a shader. The cross validation is done at the Interface Block
954        * name level.
955        */
956       if (var->is_interface_instance())
957          continue;
958 
959       /* Don't cross validate temporaries that are at global scope.  These
960        * will eventually get pulled into the shaders 'main'.
961        */
962       if (var->data.mode == ir_var_temporary)
963          continue;
964 
965       /* If a global with this name has already been seen, verify that the
966        * new instance has the same type.  In addition, if the globals have
967        * initializers, the values of the initializers must be the same.
968        */
969       ir_variable *const existing = variables->get_variable(var->name);
970       if (existing != NULL) {
971          /* Check if types match. */
972          if (var->type != existing->type) {
973             if (!validate_intrastage_arrays(prog, var, existing)) {
974                /* If it is an unsized array in a Shader Storage Block,
975                 * two different shaders can access to different elements.
976                 * Because of that, they might be converted to different
977                 * sized arrays, then check that they are compatible but
978                 * ignore the array size.
979                 */
980                if (!(var->data.mode == ir_var_shader_storage &&
981                      var->data.from_ssbo_unsized_array &&
982                      existing->data.mode == ir_var_shader_storage &&
983                      existing->data.from_ssbo_unsized_array &&
984                      var->type->gl_type == existing->type->gl_type)) {
985                   linker_error(prog, "%s `%s' declared as type "
986                                  "`%s' and type `%s'\n",
987                                  mode_string(var),
988                                  var->name, var->type->name,
989                                  existing->type->name);
990                   return;
991                }
992             }
993          }
994 
995          if (var->data.explicit_location) {
996             if (existing->data.explicit_location
997                 && (var->data.location != existing->data.location)) {
998                linker_error(prog, "explicit locations for %s "
999                             "`%s' have differing values\n",
1000                             mode_string(var), var->name);
1001                return;
1002             }
1003 
1004             if (var->data.location_frac != existing->data.location_frac) {
1005                linker_error(prog, "explicit components for %s `%s' have "
1006                             "differing values\n", mode_string(var), var->name);
1007                return;
1008             }
1009 
1010             existing->data.location = var->data.location;
1011             existing->data.explicit_location = true;
1012          } else {
1013             /* Check if uniform with implicit location was marked explicit
1014              * by earlier shader stage. If so, mark it explicit in this stage
1015              * too to make sure later processing does not treat it as
1016              * implicit one.
1017              */
1018             if (existing->data.explicit_location) {
1019                var->data.location = existing->data.location;
1020                var->data.explicit_location = true;
1021             }
1022          }
1023 
1024          /* From the GLSL 4.20 specification:
1025           * "A link error will result if two compilation units in a program
1026           *  specify different integer-constant bindings for the same
1027           *  opaque-uniform name.  However, it is not an error to specify a
1028           *  binding on some but not all declarations for the same name"
1029           */
1030          if (var->data.explicit_binding) {
1031             if (existing->data.explicit_binding &&
1032                 var->data.binding != existing->data.binding) {
1033                linker_error(prog, "explicit bindings for %s "
1034                             "`%s' have differing values\n",
1035                             mode_string(var), var->name);
1036                return;
1037             }
1038 
1039             existing->data.binding = var->data.binding;
1040             existing->data.explicit_binding = true;
1041          }
1042 
1043          if (var->type->contains_atomic() &&
1044              var->data.offset != existing->data.offset) {
1045             linker_error(prog, "offset specifications for %s "
1046                          "`%s' have differing values\n",
1047                          mode_string(var), var->name);
1048             return;
1049          }
1050 
1051          /* Validate layout qualifiers for gl_FragDepth.
1052           *
1053           * From the AMD/ARB_conservative_depth specs:
1054           *
1055           *    "If gl_FragDepth is redeclared in any fragment shader in a
1056           *    program, it must be redeclared in all fragment shaders in
1057           *    that program that have static assignments to
1058           *    gl_FragDepth. All redeclarations of gl_FragDepth in all
1059           *    fragment shaders in a single program must have the same set
1060           *    of qualifiers."
1061           */
1062          if (strcmp(var->name, "gl_FragDepth") == 0) {
1063             bool layout_declared = var->data.depth_layout != ir_depth_layout_none;
1064             bool layout_differs =
1065                var->data.depth_layout != existing->data.depth_layout;
1066 
1067             if (layout_declared && layout_differs) {
1068                linker_error(prog,
1069                             "All redeclarations of gl_FragDepth in all "
1070                             "fragment shaders in a single program must have "
1071                             "the same set of qualifiers.\n");
1072             }
1073 
1074             if (var->data.used && layout_differs) {
1075                linker_error(prog,
1076                             "If gl_FragDepth is redeclared with a layout "
1077                             "qualifier in any fragment shader, it must be "
1078                             "redeclared with the same layout qualifier in "
1079                             "all fragment shaders that have assignments to "
1080                             "gl_FragDepth\n");
1081             }
1082          }
1083 
1084          /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
1085           *
1086           *     "If a shared global has multiple initializers, the
1087           *     initializers must all be constant expressions, and they
1088           *     must all have the same value. Otherwise, a link error will
1089           *     result. (A shared global having only one initializer does
1090           *     not require that initializer to be a constant expression.)"
1091           *
1092           * Previous to 4.20 the GLSL spec simply said that initializers
1093           * must have the same value.  In this case of non-constant
1094           * initializers, this was impossible to determine.  As a result,
1095           * no vendor actually implemented that behavior.  The 4.20
1096           * behavior matches the implemented behavior of at least one other
1097           * vendor, so we'll implement that for all GLSL versions.
1098           * If (at least) one of these constant expressions is implicit,
1099           * because it was added by glsl_zero_init, we skip the verification.
1100           */
1101          if (var->constant_initializer != NULL) {
1102             if (existing->constant_initializer != NULL &&
1103                 !existing->data.is_implicit_initializer &&
1104                 !var->data.is_implicit_initializer) {
1105                if (!var->constant_initializer->has_value(existing->constant_initializer)) {
1106                   linker_error(prog, "initializers for %s "
1107                                "`%s' have differing values\n",
1108                                mode_string(var), var->name);
1109                   return;
1110                }
1111             } else {
1112                /* If the first-seen instance of a particular uniform did
1113                 * not have an initializer but a later instance does,
1114                 * replace the former with the later.
1115                 */
1116                if (!var->data.is_implicit_initializer)
1117                   variables->replace_variable(existing->name, var);
1118             }
1119          }
1120 
1121          if (var->data.has_initializer) {
1122             if (existing->data.has_initializer
1123                 && (var->constant_initializer == NULL
1124                     || existing->constant_initializer == NULL)) {
1125                linker_error(prog,
1126                             "shared global variable `%s' has multiple "
1127                             "non-constant initializers.\n",
1128                             var->name);
1129                return;
1130             }
1131          }
1132 
1133          if (existing->data.explicit_invariant != var->data.explicit_invariant) {
1134             linker_error(prog, "declarations for %s `%s' have "
1135                          "mismatching invariant qualifiers\n",
1136                          mode_string(var), var->name);
1137             return;
1138          }
1139          if (existing->data.centroid != var->data.centroid) {
1140             linker_error(prog, "declarations for %s `%s' have "
1141                          "mismatching centroid qualifiers\n",
1142                          mode_string(var), var->name);
1143             return;
1144          }
1145          if (existing->data.sample != var->data.sample) {
1146             linker_error(prog, "declarations for %s `%s` have "
1147                          "mismatching sample qualifiers\n",
1148                          mode_string(var), var->name);
1149             return;
1150          }
1151          if (existing->data.image_format != var->data.image_format) {
1152             linker_error(prog, "declarations for %s `%s` have "
1153                          "mismatching image format qualifiers\n",
1154                          mode_string(var), var->name);
1155             return;
1156          }
1157 
1158          /* Check the precision qualifier matches for uniform variables on
1159           * GLSL ES.
1160           */
1161          if (!ctx->Const.AllowGLSLRelaxedES &&
1162              prog->IsES && !var->get_interface_type() &&
1163              existing->data.precision != var->data.precision) {
1164             if ((existing->data.used && var->data.used) || prog->data->Version >= 300) {
1165                linker_error(prog, "declarations for %s `%s` have "
1166                             "mismatching precision qualifiers\n",
1167                             mode_string(var), var->name);
1168                return;
1169             } else {
1170                linker_warning(prog, "declarations for %s `%s` have "
1171                               "mismatching precision qualifiers\n",
1172                               mode_string(var), var->name);
1173             }
1174          }
1175 
1176          /* In OpenGL GLSL 3.20 spec, section 4.3.9:
1177           *
1178           *   "It is a link-time error if any particular shader interface
1179           *    contains:
1180           *
1181           *    - two different blocks, each having no instance name, and each
1182           *      having a member of the same name, or
1183           *
1184           *    - a variable outside a block, and a block with no instance name,
1185           *      where the variable has the same name as a member in the block."
1186           */
1187          const glsl_type *var_itype = var->get_interface_type();
1188          const glsl_type *existing_itype = existing->get_interface_type();
1189          if (var_itype != existing_itype) {
1190             if (!var_itype || !existing_itype) {
1191                linker_error(prog, "declarations for %s `%s` are inside block "
1192                             "`%s` and outside a block",
1193                             mode_string(var), var->name,
1194                             var_itype ? var_itype->name : existing_itype->name);
1195                return;
1196             } else if (strcmp(var_itype->name, existing_itype->name) != 0) {
1197                linker_error(prog, "declarations for %s `%s` are inside blocks "
1198                             "`%s` and `%s`",
1199                             mode_string(var), var->name,
1200                             existing_itype->name,
1201                             var_itype->name);
1202                return;
1203             }
1204          }
1205       } else
1206          variables->add_variable(var);
1207    }
1208 }
1209 
1210 
1211 /**
1212  * Perform validation of uniforms used across multiple shader stages
1213  */
1214 static void
cross_validate_uniforms(struct gl_context * ctx,struct gl_shader_program * prog)1215 cross_validate_uniforms(struct gl_context *ctx,
1216                         struct gl_shader_program *prog)
1217 {
1218    glsl_symbol_table variables;
1219    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1220       if (prog->_LinkedShaders[i] == NULL)
1221          continue;
1222 
1223       cross_validate_globals(ctx, prog, prog->_LinkedShaders[i]->ir,
1224                              &variables, true);
1225    }
1226 }
1227 
1228 /**
1229  * Accumulates the array of buffer blocks and checks that all definitions of
1230  * blocks agree on their contents.
1231  */
1232 static bool
interstage_cross_validate_uniform_blocks(struct gl_shader_program * prog,bool validate_ssbo)1233 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog,
1234                                          bool validate_ssbo)
1235 {
1236    int *ifc_blk_stage_idx[MESA_SHADER_STAGES];
1237    struct gl_uniform_block *blks = NULL;
1238    unsigned *num_blks = validate_ssbo ? &prog->data->NumShaderStorageBlocks :
1239       &prog->data->NumUniformBlocks;
1240 
1241    unsigned max_num_buffer_blocks = 0;
1242    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1243       if (prog->_LinkedShaders[i]) {
1244          if (validate_ssbo) {
1245             max_num_buffer_blocks +=
1246                prog->_LinkedShaders[i]->Program->info.num_ssbos;
1247          } else {
1248             max_num_buffer_blocks +=
1249                prog->_LinkedShaders[i]->Program->info.num_ubos;
1250          }
1251       }
1252    }
1253 
1254    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1255       struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1256 
1257       ifc_blk_stage_idx[i] =
1258          (int *) malloc(sizeof(int) * max_num_buffer_blocks);
1259       for (unsigned int j = 0; j < max_num_buffer_blocks; j++)
1260          ifc_blk_stage_idx[i][j] = -1;
1261 
1262       if (sh == NULL)
1263          continue;
1264 
1265       unsigned sh_num_blocks;
1266       struct gl_uniform_block **sh_blks;
1267       if (validate_ssbo) {
1268          sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ssbos;
1269          sh_blks = sh->Program->sh.ShaderStorageBlocks;
1270       } else {
1271          sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ubos;
1272          sh_blks = sh->Program->sh.UniformBlocks;
1273       }
1274 
1275       for (unsigned int j = 0; j < sh_num_blocks; j++) {
1276          int index = link_cross_validate_uniform_block(prog->data, &blks,
1277                                                        num_blks, sh_blks[j]);
1278 
1279          if (index == -1) {
1280             linker_error(prog, "buffer block `%s' has mismatching "
1281                          "definitions\n", sh_blks[j]->Name);
1282 
1283             for (unsigned k = 0; k <= i; k++) {
1284                free(ifc_blk_stage_idx[k]);
1285             }
1286 
1287             /* Reset the block count. This will help avoid various segfaults
1288              * from api calls that assume the array exists due to the count
1289              * being non-zero.
1290              */
1291             *num_blks = 0;
1292             return false;
1293          }
1294 
1295          ifc_blk_stage_idx[i][index] = j;
1296       }
1297    }
1298 
1299    /* Update per stage block pointers to point to the program list.
1300     * FIXME: We should be able to free the per stage blocks here.
1301     */
1302    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1303       for (unsigned j = 0; j < *num_blks; j++) {
1304          int stage_index = ifc_blk_stage_idx[i][j];
1305 
1306          if (stage_index != -1) {
1307             struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1308 
1309             struct gl_uniform_block **sh_blks = validate_ssbo ?
1310                sh->Program->sh.ShaderStorageBlocks :
1311                sh->Program->sh.UniformBlocks;
1312 
1313             blks[j].stageref |= sh_blks[stage_index]->stageref;
1314             sh_blks[stage_index] = &blks[j];
1315          }
1316       }
1317    }
1318 
1319    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1320       free(ifc_blk_stage_idx[i]);
1321    }
1322 
1323    if (validate_ssbo)
1324       prog->data->ShaderStorageBlocks = blks;
1325    else
1326       prog->data->UniformBlocks = blks;
1327 
1328    return true;
1329 }
1330 
1331 /**
1332  * Verifies the invariance of built-in special variables.
1333  */
1334 static bool
validate_invariant_builtins(struct gl_shader_program * prog,const gl_linked_shader * vert,const gl_linked_shader * frag)1335 validate_invariant_builtins(struct gl_shader_program *prog,
1336                             const gl_linked_shader *vert,
1337                             const gl_linked_shader *frag)
1338 {
1339    const ir_variable *var_vert;
1340    const ir_variable *var_frag;
1341 
1342    if (!vert || !frag)
1343       return true;
1344 
1345    /*
1346     * From OpenGL ES Shading Language 1.0 specification
1347     * (4.6.4 Invariance and Linkage):
1348     *     "The invariance of varyings that are declared in both the vertex and
1349     *     fragment shaders must match. For the built-in special variables,
1350     *     gl_FragCoord can only be declared invariant if and only if
1351     *     gl_Position is declared invariant. Similarly gl_PointCoord can only
1352     *     be declared invariant if and only if gl_PointSize is declared
1353     *     invariant. It is an error to declare gl_FrontFacing as invariant.
1354     *     The invariance of gl_FrontFacing is the same as the invariance of
1355     *     gl_Position."
1356     */
1357    var_frag = frag->symbols->get_variable("gl_FragCoord");
1358    if (var_frag && var_frag->data.invariant) {
1359       var_vert = vert->symbols->get_variable("gl_Position");
1360       if (var_vert && !var_vert->data.invariant) {
1361          linker_error(prog,
1362                "fragment shader built-in `%s' has invariant qualifier, "
1363                "but vertex shader built-in `%s' lacks invariant qualifier\n",
1364                var_frag->name, var_vert->name);
1365          return false;
1366       }
1367    }
1368 
1369    var_frag = frag->symbols->get_variable("gl_PointCoord");
1370    if (var_frag && var_frag->data.invariant) {
1371       var_vert = vert->symbols->get_variable("gl_PointSize");
1372       if (var_vert && !var_vert->data.invariant) {
1373          linker_error(prog,
1374                "fragment shader built-in `%s' has invariant qualifier, "
1375                "but vertex shader built-in `%s' lacks invariant qualifier\n",
1376                var_frag->name, var_vert->name);
1377          return false;
1378       }
1379    }
1380 
1381    var_frag = frag->symbols->get_variable("gl_FrontFacing");
1382    if (var_frag && var_frag->data.invariant) {
1383       linker_error(prog,
1384             "fragment shader built-in `%s' can not be declared as invariant\n",
1385             var_frag->name);
1386       return false;
1387    }
1388 
1389    return true;
1390 }
1391 
1392 /**
1393  * Populates a shaders symbol table with all global declarations
1394  */
1395 static void
populate_symbol_table(gl_linked_shader * sh,glsl_symbol_table * symbols)1396 populate_symbol_table(gl_linked_shader *sh, glsl_symbol_table *symbols)
1397 {
1398    sh->symbols = new(sh) glsl_symbol_table;
1399 
1400    _mesa_glsl_copy_symbols_from_table(sh->ir, symbols, sh->symbols);
1401 }
1402 
1403 
1404 /**
1405  * Remap variables referenced in an instruction tree
1406  *
1407  * This is used when instruction trees are cloned from one shader and placed in
1408  * another.  These trees will contain references to \c ir_variable nodes that
1409  * do not exist in the target shader.  This function finds these \c ir_variable
1410  * references and replaces the references with matching variables in the target
1411  * shader.
1412  *
1413  * If there is no matching variable in the target shader, a clone of the
1414  * \c ir_variable is made and added to the target shader.  The new variable is
1415  * added to \b both the instruction stream and the symbol table.
1416  *
1417  * \param inst         IR tree that is to be processed.
1418  * \param symbols      Symbol table containing global scope symbols in the
1419  *                     linked shader.
1420  * \param instructions Instruction stream where new variable declarations
1421  *                     should be added.
1422  */
1423 static void
remap_variables(ir_instruction * inst,struct gl_linked_shader * target,hash_table * temps)1424 remap_variables(ir_instruction *inst, struct gl_linked_shader *target,
1425                 hash_table *temps)
1426 {
1427    class remap_visitor : public ir_hierarchical_visitor {
1428    public:
1429          remap_visitor(struct gl_linked_shader *target, hash_table *temps)
1430       {
1431          this->target = target;
1432          this->symbols = target->symbols;
1433          this->instructions = target->ir;
1434          this->temps = temps;
1435       }
1436 
1437       virtual ir_visitor_status visit(ir_dereference_variable *ir)
1438       {
1439          if (ir->var->data.mode == ir_var_temporary) {
1440             hash_entry *entry = _mesa_hash_table_search(temps, ir->var);
1441             ir_variable *var = entry ? (ir_variable *) entry->data : NULL;
1442 
1443             assert(var != NULL);
1444             ir->var = var;
1445             return visit_continue;
1446          }
1447 
1448          ir_variable *const existing =
1449             this->symbols->get_variable(ir->var->name);
1450          if (existing != NULL)
1451             ir->var = existing;
1452          else {
1453             ir_variable *copy = ir->var->clone(this->target, NULL);
1454 
1455             this->symbols->add_variable(copy);
1456             this->instructions->push_head(copy);
1457             ir->var = copy;
1458          }
1459 
1460          return visit_continue;
1461       }
1462 
1463    private:
1464       struct gl_linked_shader *target;
1465       glsl_symbol_table *symbols;
1466       exec_list *instructions;
1467       hash_table *temps;
1468    };
1469 
1470    remap_visitor v(target, temps);
1471 
1472    inst->accept(&v);
1473 }
1474 
1475 
1476 /**
1477  * Move non-declarations from one instruction stream to another
1478  *
1479  * The intended usage pattern of this function is to pass the pointer to the
1480  * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
1481  * pointer) for \c last and \c false for \c make_copies on the first
1482  * call.  Successive calls pass the return value of the previous call for
1483  * \c last and \c true for \c make_copies.
1484  *
1485  * \param instructions Source instruction stream
1486  * \param last         Instruction after which new instructions should be
1487  *                     inserted in the target instruction stream
1488  * \param make_copies  Flag selecting whether instructions in \c instructions
1489  *                     should be copied (via \c ir_instruction::clone) into the
1490  *                     target list or moved.
1491  *
1492  * \return
1493  * The new "last" instruction in the target instruction stream.  This pointer
1494  * is suitable for use as the \c last parameter of a later call to this
1495  * function.
1496  */
1497 static exec_node *
move_non_declarations(exec_list * instructions,exec_node * last,bool make_copies,gl_linked_shader * target)1498 move_non_declarations(exec_list *instructions, exec_node *last,
1499                       bool make_copies, gl_linked_shader *target)
1500 {
1501    hash_table *temps = NULL;
1502 
1503    if (make_copies)
1504       temps = _mesa_pointer_hash_table_create(NULL);
1505 
1506    foreach_in_list_safe(ir_instruction, inst, instructions) {
1507       if (inst->as_function())
1508          continue;
1509 
1510       ir_variable *var = inst->as_variable();
1511       if ((var != NULL) && (var->data.mode != ir_var_temporary))
1512          continue;
1513 
1514       assert(inst->as_assignment()
1515              || inst->as_call()
1516              || inst->as_if() /* for initializers with the ?: operator */
1517              || ((var != NULL) && (var->data.mode == ir_var_temporary)));
1518 
1519       if (make_copies) {
1520          inst = inst->clone(target, NULL);
1521 
1522          if (var != NULL)
1523             _mesa_hash_table_insert(temps, var, inst);
1524          else
1525             remap_variables(inst, target, temps);
1526       } else {
1527          inst->remove();
1528       }
1529 
1530       last->insert_after(inst);
1531       last = inst;
1532    }
1533 
1534    if (make_copies)
1535       _mesa_hash_table_destroy(temps, NULL);
1536 
1537    return last;
1538 }
1539 
1540 
1541 /**
1542  * This class is only used in link_intrastage_shaders() below but declaring
1543  * it inside that function leads to compiler warnings with some versions of
1544  * gcc.
1545  */
1546 class array_sizing_visitor : public deref_type_updater {
1547 public:
1548    using deref_type_updater::visit;
1549 
array_sizing_visitor()1550    array_sizing_visitor()
1551       : mem_ctx(ralloc_context(NULL)),
1552         unnamed_interfaces(_mesa_pointer_hash_table_create(NULL))
1553    {
1554    }
1555 
~array_sizing_visitor()1556    ~array_sizing_visitor()
1557    {
1558       _mesa_hash_table_destroy(this->unnamed_interfaces, NULL);
1559       ralloc_free(this->mem_ctx);
1560    }
1561 
visit(ir_variable * var)1562    virtual ir_visitor_status visit(ir_variable *var)
1563    {
1564       const glsl_type *type_without_array;
1565       bool implicit_sized_array = var->data.implicit_sized_array;
1566       fixup_type(&var->type, var->data.max_array_access,
1567                  var->data.from_ssbo_unsized_array,
1568                  &implicit_sized_array);
1569       var->data.implicit_sized_array = implicit_sized_array;
1570       type_without_array = var->type->without_array();
1571       if (var->type->is_interface()) {
1572          if (interface_contains_unsized_arrays(var->type)) {
1573             const glsl_type *new_type =
1574                resize_interface_members(var->type,
1575                                         var->get_max_ifc_array_access(),
1576                                         var->is_in_shader_storage_block());
1577             var->type = new_type;
1578             var->change_interface_type(new_type);
1579          }
1580       } else if (type_without_array->is_interface()) {
1581          if (interface_contains_unsized_arrays(type_without_array)) {
1582             const glsl_type *new_type =
1583                resize_interface_members(type_without_array,
1584                                         var->get_max_ifc_array_access(),
1585                                         var->is_in_shader_storage_block());
1586             var->change_interface_type(new_type);
1587             var->type = update_interface_members_array(var->type, new_type);
1588          }
1589       } else if (const glsl_type *ifc_type = var->get_interface_type()) {
1590          /* Store a pointer to the variable in the unnamed_interfaces
1591           * hashtable.
1592           */
1593          hash_entry *entry =
1594                _mesa_hash_table_search(this->unnamed_interfaces,
1595                                        ifc_type);
1596 
1597          ir_variable **interface_vars = entry ? (ir_variable **) entry->data : NULL;
1598 
1599          if (interface_vars == NULL) {
1600             interface_vars = rzalloc_array(mem_ctx, ir_variable *,
1601                                            ifc_type->length);
1602             _mesa_hash_table_insert(this->unnamed_interfaces, ifc_type,
1603                                     interface_vars);
1604          }
1605          unsigned index = ifc_type->field_index(var->name);
1606          assert(index < ifc_type->length);
1607          assert(interface_vars[index] == NULL);
1608          interface_vars[index] = var;
1609       }
1610       return visit_continue;
1611    }
1612 
1613    /**
1614     * For each unnamed interface block that was discovered while running the
1615     * visitor, adjust the interface type to reflect the newly assigned array
1616     * sizes, and fix up the ir_variable nodes to point to the new interface
1617     * type.
1618     */
fixup_unnamed_interface_types()1619    void fixup_unnamed_interface_types()
1620    {
1621       hash_table_call_foreach(this->unnamed_interfaces,
1622                               fixup_unnamed_interface_type, NULL);
1623    }
1624 
1625 private:
1626    /**
1627     * If the type pointed to by \c type represents an unsized array, replace
1628     * it with a sized array whose size is determined by max_array_access.
1629     */
fixup_type(const glsl_type ** type,unsigned max_array_access,bool from_ssbo_unsized_array,bool * implicit_sized)1630    static void fixup_type(const glsl_type **type, unsigned max_array_access,
1631                           bool from_ssbo_unsized_array, bool *implicit_sized)
1632    {
1633       if (!from_ssbo_unsized_array && (*type)->is_unsized_array()) {
1634          *type = glsl_type::get_array_instance((*type)->fields.array,
1635                                                max_array_access + 1);
1636          *implicit_sized = true;
1637          assert(*type != NULL);
1638       }
1639    }
1640 
1641    static const glsl_type *
update_interface_members_array(const glsl_type * type,const glsl_type * new_interface_type)1642    update_interface_members_array(const glsl_type *type,
1643                                   const glsl_type *new_interface_type)
1644    {
1645       const glsl_type *element_type = type->fields.array;
1646       if (element_type->is_array()) {
1647          const glsl_type *new_array_type =
1648             update_interface_members_array(element_type, new_interface_type);
1649          return glsl_type::get_array_instance(new_array_type, type->length);
1650       } else {
1651          return glsl_type::get_array_instance(new_interface_type,
1652                                               type->length);
1653       }
1654    }
1655 
1656    /**
1657     * Determine whether the given interface type contains unsized arrays (if
1658     * it doesn't, array_sizing_visitor doesn't need to process it).
1659     */
interface_contains_unsized_arrays(const glsl_type * type)1660    static bool interface_contains_unsized_arrays(const glsl_type *type)
1661    {
1662       for (unsigned i = 0; i < type->length; i++) {
1663          const glsl_type *elem_type = type->fields.structure[i].type;
1664          if (elem_type->is_unsized_array())
1665             return true;
1666       }
1667       return false;
1668    }
1669 
1670    /**
1671     * Create a new interface type based on the given type, with unsized arrays
1672     * replaced by sized arrays whose size is determined by
1673     * max_ifc_array_access.
1674     */
1675    static const glsl_type *
resize_interface_members(const glsl_type * type,const int * max_ifc_array_access,bool is_ssbo)1676    resize_interface_members(const glsl_type *type,
1677                             const int *max_ifc_array_access,
1678                             bool is_ssbo)
1679    {
1680       unsigned num_fields = type->length;
1681       glsl_struct_field *fields = new glsl_struct_field[num_fields];
1682       memcpy(fields, type->fields.structure,
1683              num_fields * sizeof(*fields));
1684       for (unsigned i = 0; i < num_fields; i++) {
1685          bool implicit_sized_array = fields[i].implicit_sized_array;
1686          /* If SSBO last member is unsized array, we don't replace it by a sized
1687           * array.
1688           */
1689          if (is_ssbo && i == (num_fields - 1))
1690             fixup_type(&fields[i].type, max_ifc_array_access[i],
1691                        true, &implicit_sized_array);
1692          else
1693             fixup_type(&fields[i].type, max_ifc_array_access[i],
1694                        false, &implicit_sized_array);
1695          fields[i].implicit_sized_array = implicit_sized_array;
1696       }
1697       glsl_interface_packing packing =
1698          (glsl_interface_packing) type->interface_packing;
1699       bool row_major = (bool) type->interface_row_major;
1700       const glsl_type *new_ifc_type =
1701          glsl_type::get_interface_instance(fields, num_fields,
1702                                            packing, row_major, type->name);
1703       delete [] fields;
1704       return new_ifc_type;
1705    }
1706 
fixup_unnamed_interface_type(const void * key,void * data,void *)1707    static void fixup_unnamed_interface_type(const void *key, void *data,
1708                                             void *)
1709    {
1710       const glsl_type *ifc_type = (const glsl_type *) key;
1711       ir_variable **interface_vars = (ir_variable **) data;
1712       unsigned num_fields = ifc_type->length;
1713       glsl_struct_field *fields = new glsl_struct_field[num_fields];
1714       memcpy(fields, ifc_type->fields.structure,
1715              num_fields * sizeof(*fields));
1716       bool interface_type_changed = false;
1717       for (unsigned i = 0; i < num_fields; i++) {
1718          if (interface_vars[i] != NULL &&
1719              fields[i].type != interface_vars[i]->type) {
1720             fields[i].type = interface_vars[i]->type;
1721             interface_type_changed = true;
1722          }
1723       }
1724       if (!interface_type_changed) {
1725          delete [] fields;
1726          return;
1727       }
1728       glsl_interface_packing packing =
1729          (glsl_interface_packing) ifc_type->interface_packing;
1730       bool row_major = (bool) ifc_type->interface_row_major;
1731       const glsl_type *new_ifc_type =
1732          glsl_type::get_interface_instance(fields, num_fields, packing,
1733                                            row_major, ifc_type->name);
1734       delete [] fields;
1735       for (unsigned i = 0; i < num_fields; i++) {
1736          if (interface_vars[i] != NULL)
1737             interface_vars[i]->change_interface_type(new_ifc_type);
1738       }
1739    }
1740 
1741    /**
1742     * Memory context used to allocate the data in \c unnamed_interfaces.
1743     */
1744    void *mem_ctx;
1745 
1746    /**
1747     * Hash table from const glsl_type * to an array of ir_variable *'s
1748     * pointing to the ir_variables constituting each unnamed interface block.
1749     */
1750    hash_table *unnamed_interfaces;
1751 };
1752 
1753 static bool
validate_xfb_buffer_stride(struct gl_context * ctx,unsigned idx,struct gl_shader_program * prog)1754 validate_xfb_buffer_stride(struct gl_context *ctx, unsigned idx,
1755                            struct gl_shader_program *prog)
1756 {
1757    /* We will validate doubles at a later stage */
1758    if (prog->TransformFeedback.BufferStride[idx] % 4) {
1759       linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1760                    "multiple of 4 or if its applied to a type that is "
1761                    "or contains a double a multiple of 8.",
1762                    prog->TransformFeedback.BufferStride[idx]);
1763       return false;
1764    }
1765 
1766    if (prog->TransformFeedback.BufferStride[idx] / 4 >
1767        ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1768       linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1769                    "limit has been exceeded.");
1770       return false;
1771    }
1772 
1773    return true;
1774 }
1775 
1776 /**
1777  * Check for conflicting xfb_stride default qualifiers and store buffer stride
1778  * for later use.
1779  */
1780 static void
link_xfb_stride_layout_qualifiers(struct gl_context * ctx,struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders)1781 link_xfb_stride_layout_qualifiers(struct gl_context *ctx,
1782                                   struct gl_shader_program *prog,
1783                                   struct gl_shader **shader_list,
1784                                   unsigned num_shaders)
1785 {
1786    for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
1787       prog->TransformFeedback.BufferStride[i] = 0;
1788    }
1789 
1790    for (unsigned i = 0; i < num_shaders; i++) {
1791       struct gl_shader *shader = shader_list[i];
1792 
1793       for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1794          if (shader->TransformFeedbackBufferStride[j]) {
1795             if (prog->TransformFeedback.BufferStride[j] == 0) {
1796                prog->TransformFeedback.BufferStride[j] =
1797                   shader->TransformFeedbackBufferStride[j];
1798                if (!validate_xfb_buffer_stride(ctx, j, prog))
1799                   return;
1800             } else if (prog->TransformFeedback.BufferStride[j] !=
1801                        shader->TransformFeedbackBufferStride[j]){
1802                linker_error(prog,
1803                             "intrastage shaders defined with conflicting "
1804                             "xfb_stride for buffer %d (%d and %d)\n", j,
1805                             prog->TransformFeedback.BufferStride[j],
1806                             shader->TransformFeedbackBufferStride[j]);
1807                return;
1808             }
1809          }
1810       }
1811    }
1812 }
1813 
1814 /**
1815  * Check for conflicting bindless/bound sampler/image layout qualifiers at
1816  * global scope.
1817  */
1818 static void
link_bindless_layout_qualifiers(struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders)1819 link_bindless_layout_qualifiers(struct gl_shader_program *prog,
1820                                 struct gl_shader **shader_list,
1821                                 unsigned num_shaders)
1822 {
1823    bool bindless_sampler, bindless_image;
1824    bool bound_sampler, bound_image;
1825 
1826    bindless_sampler = bindless_image = false;
1827    bound_sampler = bound_image = false;
1828 
1829    for (unsigned i = 0; i < num_shaders; i++) {
1830       struct gl_shader *shader = shader_list[i];
1831 
1832       if (shader->bindless_sampler)
1833          bindless_sampler = true;
1834       if (shader->bindless_image)
1835          bindless_image = true;
1836       if (shader->bound_sampler)
1837          bound_sampler = true;
1838       if (shader->bound_image)
1839          bound_image = true;
1840 
1841       if ((bindless_sampler && bound_sampler) ||
1842           (bindless_image && bound_image)) {
1843          /* From section 4.4.6 of the ARB_bindless_texture spec:
1844           *
1845           *     "If both bindless_sampler and bound_sampler, or bindless_image
1846           *      and bound_image, are declared at global scope in any
1847           *      compilation unit, a link- time error will be generated."
1848           */
1849          linker_error(prog, "both bindless_sampler and bound_sampler, or "
1850                       "bindless_image and bound_image, can't be declared at "
1851                       "global scope");
1852       }
1853    }
1854 }
1855 
1856 /**
1857  * Check for conflicting viewport_relative settings across shaders, and sets
1858  * the value for the linked shader.
1859  */
1860 static void
link_layer_viewport_relative_qualifier(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1861 link_layer_viewport_relative_qualifier(struct gl_shader_program *prog,
1862                                        struct gl_program *gl_prog,
1863                                        struct gl_shader **shader_list,
1864                                        unsigned num_shaders)
1865 {
1866    unsigned i;
1867 
1868    /* Find first shader with explicit layer declaration */
1869    for (i = 0; i < num_shaders; i++) {
1870       if (shader_list[i]->redeclares_gl_layer) {
1871          gl_prog->info.layer_viewport_relative =
1872             shader_list[i]->layer_viewport_relative;
1873          break;
1874       }
1875    }
1876 
1877    /* Now make sure that each subsequent shader's explicit layer declaration
1878     * matches the first one's.
1879     */
1880    for (; i < num_shaders; i++) {
1881       if (shader_list[i]->redeclares_gl_layer &&
1882           shader_list[i]->layer_viewport_relative !=
1883           gl_prog->info.layer_viewport_relative) {
1884          linker_error(prog, "all gl_Layer redeclarations must have identical "
1885                       "viewport_relative settings");
1886       }
1887    }
1888 }
1889 
1890 /**
1891  * Performs the cross-validation of tessellation control shader vertices and
1892  * layout qualifiers for the attached tessellation control shaders,
1893  * and propagates them to the linked TCS and linked shader program.
1894  */
1895 static void
link_tcs_out_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1896 link_tcs_out_layout_qualifiers(struct gl_shader_program *prog,
1897                                struct gl_program *gl_prog,
1898                                struct gl_shader **shader_list,
1899                                unsigned num_shaders)
1900 {
1901    if (gl_prog->info.stage != MESA_SHADER_TESS_CTRL)
1902       return;
1903 
1904    gl_prog->info.tess.tcs_vertices_out = 0;
1905 
1906    /* From the GLSL 4.0 spec (chapter 4.3.8.2):
1907     *
1908     *     "All tessellation control shader layout declarations in a program
1909     *      must specify the same output patch vertex count.  There must be at
1910     *      least one layout qualifier specifying an output patch vertex count
1911     *      in any program containing tessellation control shaders; however,
1912     *      such a declaration is not required in all tessellation control
1913     *      shaders."
1914     */
1915 
1916    for (unsigned i = 0; i < num_shaders; i++) {
1917       struct gl_shader *shader = shader_list[i];
1918 
1919       if (shader->info.TessCtrl.VerticesOut != 0) {
1920          if (gl_prog->info.tess.tcs_vertices_out != 0 &&
1921              gl_prog->info.tess.tcs_vertices_out !=
1922              (unsigned) shader->info.TessCtrl.VerticesOut) {
1923             linker_error(prog, "tessellation control shader defined with "
1924                          "conflicting output vertex count (%d and %d)\n",
1925                          gl_prog->info.tess.tcs_vertices_out,
1926                          shader->info.TessCtrl.VerticesOut);
1927             return;
1928          }
1929          gl_prog->info.tess.tcs_vertices_out =
1930             shader->info.TessCtrl.VerticesOut;
1931       }
1932    }
1933 
1934    /* Just do the intrastage -> interstage propagation right now,
1935     * since we already know we're in the right type of shader program
1936     * for doing it.
1937     */
1938    if (gl_prog->info.tess.tcs_vertices_out == 0) {
1939       linker_error(prog, "tessellation control shader didn't declare "
1940                    "vertices out layout qualifier\n");
1941       return;
1942    }
1943 }
1944 
1945 
1946 /**
1947  * Performs the cross-validation of tessellation evaluation shader
1948  * primitive type, vertex spacing, ordering and point_mode layout qualifiers
1949  * for the attached tessellation evaluation shaders, and propagates them
1950  * to the linked TES and linked shader program.
1951  */
1952 static void
link_tes_in_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1953 link_tes_in_layout_qualifiers(struct gl_shader_program *prog,
1954                               struct gl_program *gl_prog,
1955                               struct gl_shader **shader_list,
1956                               unsigned num_shaders)
1957 {
1958    if (gl_prog->info.stage != MESA_SHADER_TESS_EVAL)
1959       return;
1960 
1961    int point_mode = -1;
1962    unsigned vertex_order = 0;
1963 
1964    gl_prog->info.tess.primitive_mode = PRIM_UNKNOWN;
1965    gl_prog->info.tess.spacing = TESS_SPACING_UNSPECIFIED;
1966 
1967    /* From the GLSL 4.0 spec (chapter 4.3.8.1):
1968     *
1969     *     "At least one tessellation evaluation shader (compilation unit) in
1970     *      a program must declare a primitive mode in its input layout.
1971     *      Declaration vertex spacing, ordering, and point mode identifiers is
1972     *      optional.  It is not required that all tessellation evaluation
1973     *      shaders in a program declare a primitive mode.  If spacing or
1974     *      vertex ordering declarations are omitted, the tessellation
1975     *      primitive generator will use equal spacing or counter-clockwise
1976     *      vertex ordering, respectively.  If a point mode declaration is
1977     *      omitted, the tessellation primitive generator will produce lines or
1978     *      triangles according to the primitive mode."
1979     */
1980 
1981    for (unsigned i = 0; i < num_shaders; i++) {
1982       struct gl_shader *shader = shader_list[i];
1983 
1984       if (shader->info.TessEval.PrimitiveMode != PRIM_UNKNOWN) {
1985          if (gl_prog->info.tess.primitive_mode != PRIM_UNKNOWN &&
1986              gl_prog->info.tess.primitive_mode !=
1987              shader->info.TessEval.PrimitiveMode) {
1988             linker_error(prog, "tessellation evaluation shader defined with "
1989                          "conflicting input primitive modes.\n");
1990             return;
1991          }
1992          gl_prog->info.tess.primitive_mode =
1993             shader->info.TessEval.PrimitiveMode;
1994       }
1995 
1996       if (shader->info.TessEval.Spacing != 0) {
1997          if (gl_prog->info.tess.spacing != 0 && gl_prog->info.tess.spacing !=
1998              shader->info.TessEval.Spacing) {
1999             linker_error(prog, "tessellation evaluation shader defined with "
2000                          "conflicting vertex spacing.\n");
2001             return;
2002          }
2003          gl_prog->info.tess.spacing = shader->info.TessEval.Spacing;
2004       }
2005 
2006       if (shader->info.TessEval.VertexOrder != 0) {
2007          if (vertex_order != 0 &&
2008              vertex_order != shader->info.TessEval.VertexOrder) {
2009             linker_error(prog, "tessellation evaluation shader defined with "
2010                          "conflicting ordering.\n");
2011             return;
2012          }
2013          vertex_order = shader->info.TessEval.VertexOrder;
2014       }
2015 
2016       if (shader->info.TessEval.PointMode != -1) {
2017          if (point_mode != -1 &&
2018              point_mode != shader->info.TessEval.PointMode) {
2019             linker_error(prog, "tessellation evaluation shader defined with "
2020                          "conflicting point modes.\n");
2021             return;
2022          }
2023          point_mode = shader->info.TessEval.PointMode;
2024       }
2025 
2026    }
2027 
2028    /* Just do the intrastage -> interstage propagation right now,
2029     * since we already know we're in the right type of shader program
2030     * for doing it.
2031     */
2032    if (gl_prog->info.tess.primitive_mode == PRIM_UNKNOWN) {
2033       linker_error(prog,
2034                    "tessellation evaluation shader didn't declare input "
2035                    "primitive modes.\n");
2036       return;
2037    }
2038 
2039    if (gl_prog->info.tess.spacing == TESS_SPACING_UNSPECIFIED)
2040       gl_prog->info.tess.spacing = TESS_SPACING_EQUAL;
2041 
2042    if (vertex_order == 0 || vertex_order == GL_CCW)
2043       gl_prog->info.tess.ccw = true;
2044    else
2045       gl_prog->info.tess.ccw = false;
2046 
2047 
2048    if (point_mode == -1 || point_mode == GL_FALSE)
2049       gl_prog->info.tess.point_mode = false;
2050    else
2051       gl_prog->info.tess.point_mode = true;
2052 }
2053 
2054 
2055 /**
2056  * Performs the cross-validation of layout qualifiers specified in
2057  * redeclaration of gl_FragCoord for the attached fragment shaders,
2058  * and propagates them to the linked FS and linked shader program.
2059  */
2060 static void
link_fs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)2061 link_fs_inout_layout_qualifiers(struct gl_shader_program *prog,
2062                                 struct gl_linked_shader *linked_shader,
2063                                 struct gl_shader **shader_list,
2064                                 unsigned num_shaders)
2065 {
2066    bool redeclares_gl_fragcoord = false;
2067    bool uses_gl_fragcoord = false;
2068    bool origin_upper_left = false;
2069    bool pixel_center_integer = false;
2070 
2071    if (linked_shader->Stage != MESA_SHADER_FRAGMENT ||
2072        (prog->data->Version < 150 &&
2073         !prog->ARB_fragment_coord_conventions_enable))
2074       return;
2075 
2076    for (unsigned i = 0; i < num_shaders; i++) {
2077       struct gl_shader *shader = shader_list[i];
2078       /* From the GLSL 1.50 spec, page 39:
2079        *
2080        *   "If gl_FragCoord is redeclared in any fragment shader in a program,
2081        *    it must be redeclared in all the fragment shaders in that program
2082        *    that have a static use gl_FragCoord."
2083        */
2084       if ((redeclares_gl_fragcoord && !shader->redeclares_gl_fragcoord &&
2085            shader->uses_gl_fragcoord)
2086           || (shader->redeclares_gl_fragcoord && !redeclares_gl_fragcoord &&
2087               uses_gl_fragcoord)) {
2088              linker_error(prog, "fragment shader defined with conflicting "
2089                          "layout qualifiers for gl_FragCoord\n");
2090       }
2091 
2092       /* From the GLSL 1.50 spec, page 39:
2093        *
2094        *   "All redeclarations of gl_FragCoord in all fragment shaders in a
2095        *    single program must have the same set of qualifiers."
2096        */
2097       if (redeclares_gl_fragcoord && shader->redeclares_gl_fragcoord &&
2098           (shader->origin_upper_left != origin_upper_left ||
2099            shader->pixel_center_integer != pixel_center_integer)) {
2100          linker_error(prog, "fragment shader defined with conflicting "
2101                       "layout qualifiers for gl_FragCoord\n");
2102       }
2103 
2104       /* Update the linked shader state.  Note that uses_gl_fragcoord should
2105        * accumulate the results.  The other values should replace.  If there
2106        * are multiple redeclarations, all the fields except uses_gl_fragcoord
2107        * are already known to be the same.
2108        */
2109       if (shader->redeclares_gl_fragcoord || shader->uses_gl_fragcoord) {
2110          redeclares_gl_fragcoord = shader->redeclares_gl_fragcoord;
2111          uses_gl_fragcoord |= shader->uses_gl_fragcoord;
2112          origin_upper_left = shader->origin_upper_left;
2113          pixel_center_integer = shader->pixel_center_integer;
2114       }
2115 
2116       linked_shader->Program->info.fs.early_fragment_tests |=
2117          shader->EarlyFragmentTests || shader->PostDepthCoverage;
2118       linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
2119       linked_shader->Program->info.fs.post_depth_coverage |=
2120          shader->PostDepthCoverage;
2121       linked_shader->Program->info.fs.pixel_interlock_ordered |=
2122          shader->PixelInterlockOrdered;
2123       linked_shader->Program->info.fs.pixel_interlock_unordered |=
2124          shader->PixelInterlockUnordered;
2125       linked_shader->Program->info.fs.sample_interlock_ordered |=
2126          shader->SampleInterlockOrdered;
2127       linked_shader->Program->info.fs.sample_interlock_unordered |=
2128          shader->SampleInterlockUnordered;
2129       linked_shader->Program->info.fs.advanced_blend_modes |= shader->BlendSupport;
2130    }
2131 
2132    linked_shader->Program->info.fs.pixel_center_integer = pixel_center_integer;
2133    linked_shader->Program->info.fs.origin_upper_left = origin_upper_left;
2134 }
2135 
2136 /**
2137  * Performs the cross-validation of geometry shader max_vertices and
2138  * primitive type layout qualifiers for the attached geometry shaders,
2139  * and propagates them to the linked GS and linked shader program.
2140  */
2141 static void
link_gs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)2142 link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
2143                                 struct gl_program *gl_prog,
2144                                 struct gl_shader **shader_list,
2145                                 unsigned num_shaders)
2146 {
2147    /* No in/out qualifiers defined for anything but GLSL 1.50+
2148     * geometry shaders so far.
2149     */
2150    if (gl_prog->info.stage != MESA_SHADER_GEOMETRY ||
2151        prog->data->Version < 150)
2152       return;
2153 
2154    int vertices_out = -1;
2155 
2156    gl_prog->info.gs.invocations = 0;
2157    gl_prog->info.gs.input_primitive = PRIM_UNKNOWN;
2158    gl_prog->info.gs.output_primitive = PRIM_UNKNOWN;
2159 
2160    /* From the GLSL 1.50 spec, page 46:
2161     *
2162     *     "All geometry shader output layout declarations in a program
2163     *      must declare the same layout and same value for
2164     *      max_vertices. There must be at least one geometry output
2165     *      layout declaration somewhere in a program, but not all
2166     *      geometry shaders (compilation units) are required to
2167     *      declare it."
2168     */
2169 
2170    for (unsigned i = 0; i < num_shaders; i++) {
2171       struct gl_shader *shader = shader_list[i];
2172 
2173       if (shader->info.Geom.InputType != PRIM_UNKNOWN) {
2174          if (gl_prog->info.gs.input_primitive != PRIM_UNKNOWN &&
2175              gl_prog->info.gs.input_primitive !=
2176              shader->info.Geom.InputType) {
2177             linker_error(prog, "geometry shader defined with conflicting "
2178                          "input types\n");
2179             return;
2180          }
2181          gl_prog->info.gs.input_primitive = shader->info.Geom.InputType;
2182       }
2183 
2184       if (shader->info.Geom.OutputType != PRIM_UNKNOWN) {
2185          if (gl_prog->info.gs.output_primitive != PRIM_UNKNOWN &&
2186              gl_prog->info.gs.output_primitive !=
2187              shader->info.Geom.OutputType) {
2188             linker_error(prog, "geometry shader defined with conflicting "
2189                          "output types\n");
2190             return;
2191          }
2192          gl_prog->info.gs.output_primitive = shader->info.Geom.OutputType;
2193       }
2194 
2195       if (shader->info.Geom.VerticesOut != -1) {
2196          if (vertices_out != -1 &&
2197              vertices_out != shader->info.Geom.VerticesOut) {
2198             linker_error(prog, "geometry shader defined with conflicting "
2199                          "output vertex count (%d and %d)\n",
2200                          vertices_out, shader->info.Geom.VerticesOut);
2201             return;
2202          }
2203          vertices_out = shader->info.Geom.VerticesOut;
2204       }
2205 
2206       if (shader->info.Geom.Invocations != 0) {
2207          if (gl_prog->info.gs.invocations != 0 &&
2208              gl_prog->info.gs.invocations !=
2209              (unsigned) shader->info.Geom.Invocations) {
2210             linker_error(prog, "geometry shader defined with conflicting "
2211                          "invocation count (%d and %d)\n",
2212                          gl_prog->info.gs.invocations,
2213                          shader->info.Geom.Invocations);
2214             return;
2215          }
2216          gl_prog->info.gs.invocations = shader->info.Geom.Invocations;
2217       }
2218    }
2219 
2220    /* Just do the intrastage -> interstage propagation right now,
2221     * since we already know we're in the right type of shader program
2222     * for doing it.
2223     */
2224    if (gl_prog->info.gs.input_primitive == PRIM_UNKNOWN) {
2225       linker_error(prog,
2226                    "geometry shader didn't declare primitive input type\n");
2227       return;
2228    }
2229 
2230    if (gl_prog->info.gs.output_primitive == PRIM_UNKNOWN) {
2231       linker_error(prog,
2232                    "geometry shader didn't declare primitive output type\n");
2233       return;
2234    }
2235 
2236    if (vertices_out == -1) {
2237       linker_error(prog,
2238                    "geometry shader didn't declare max_vertices\n");
2239       return;
2240    } else {
2241       gl_prog->info.gs.vertices_out = vertices_out;
2242    }
2243 
2244    if (gl_prog->info.gs.invocations == 0)
2245       gl_prog->info.gs.invocations = 1;
2246 }
2247 
2248 
2249 /**
2250  * Perform cross-validation of compute shader local_size_{x,y,z} layout and
2251  * derivative arrangement qualifiers for the attached compute shaders, and
2252  * propagate them to the linked CS and linked shader program.
2253  */
2254 static void
link_cs_input_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)2255 link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
2256                                 struct gl_program *gl_prog,
2257                                 struct gl_shader **shader_list,
2258                                 unsigned num_shaders)
2259 {
2260    /* This function is called for all shader stages, but it only has an effect
2261     * for compute shaders.
2262     */
2263    if (gl_prog->info.stage != MESA_SHADER_COMPUTE)
2264       return;
2265 
2266    for (int i = 0; i < 3; i++)
2267       gl_prog->info.workgroup_size[i] = 0;
2268 
2269    gl_prog->info.workgroup_size_variable = false;
2270 
2271    gl_prog->info.cs.derivative_group = DERIVATIVE_GROUP_NONE;
2272 
2273    /* From the ARB_compute_shader spec, in the section describing local size
2274     * declarations:
2275     *
2276     *     If multiple compute shaders attached to a single program object
2277     *     declare local work-group size, the declarations must be identical;
2278     *     otherwise a link-time error results. Furthermore, if a program
2279     *     object contains any compute shaders, at least one must contain an
2280     *     input layout qualifier specifying the local work sizes of the
2281     *     program, or a link-time error will occur.
2282     */
2283    for (unsigned sh = 0; sh < num_shaders; sh++) {
2284       struct gl_shader *shader = shader_list[sh];
2285 
2286       if (shader->info.Comp.LocalSize[0] != 0) {
2287          if (gl_prog->info.workgroup_size[0] != 0) {
2288             for (int i = 0; i < 3; i++) {
2289                if (gl_prog->info.workgroup_size[i] !=
2290                    shader->info.Comp.LocalSize[i]) {
2291                   linker_error(prog, "compute shader defined with conflicting "
2292                                "local sizes\n");
2293                   return;
2294                }
2295             }
2296          }
2297          for (int i = 0; i < 3; i++) {
2298             gl_prog->info.workgroup_size[i] =
2299                shader->info.Comp.LocalSize[i];
2300          }
2301       } else if (shader->info.Comp.LocalSizeVariable) {
2302          if (gl_prog->info.workgroup_size[0] != 0) {
2303             /* The ARB_compute_variable_group_size spec says:
2304              *
2305              *     If one compute shader attached to a program declares a
2306              *     variable local group size and a second compute shader
2307              *     attached to the same program declares a fixed local group
2308              *     size, a link-time error results.
2309              */
2310             linker_error(prog, "compute shader defined with both fixed and "
2311                          "variable local group size\n");
2312             return;
2313          }
2314          gl_prog->info.workgroup_size_variable = true;
2315       }
2316 
2317       enum gl_derivative_group group = shader->info.Comp.DerivativeGroup;
2318       if (group != DERIVATIVE_GROUP_NONE) {
2319          if (gl_prog->info.cs.derivative_group != DERIVATIVE_GROUP_NONE &&
2320              gl_prog->info.cs.derivative_group != group) {
2321             linker_error(prog, "compute shader defined with conflicting "
2322                          "derivative groups\n");
2323             return;
2324          }
2325          gl_prog->info.cs.derivative_group = group;
2326       }
2327    }
2328 
2329    /* Just do the intrastage -> interstage propagation right now,
2330     * since we already know we're in the right type of shader program
2331     * for doing it.
2332     */
2333    if (gl_prog->info.workgroup_size[0] == 0 &&
2334        !gl_prog->info.workgroup_size_variable) {
2335       linker_error(prog, "compute shader must contain a fixed or a variable "
2336                          "local group size\n");
2337       return;
2338    }
2339 
2340    if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
2341       if (gl_prog->info.workgroup_size[0] % 2 != 0) {
2342          linker_error(prog, "derivative_group_quadsNV must be used with a "
2343                       "local group size whose first dimension "
2344                       "is a multiple of 2\n");
2345          return;
2346       }
2347       if (gl_prog->info.workgroup_size[1] % 2 != 0) {
2348          linker_error(prog, "derivative_group_quadsNV must be used with a local"
2349                       "group size whose second dimension "
2350                       "is a multiple of 2\n");
2351          return;
2352       }
2353    } else if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
2354       if ((gl_prog->info.workgroup_size[0] *
2355            gl_prog->info.workgroup_size[1] *
2356            gl_prog->info.workgroup_size[2]) % 4 != 0) {
2357          linker_error(prog, "derivative_group_linearNV must be used with a "
2358                       "local group size whose total number of invocations "
2359                       "is a multiple of 4\n");
2360          return;
2361       }
2362    }
2363 }
2364 
2365 /**
2366  * Link all out variables on a single stage which are not
2367  * directly used in a shader with the main function.
2368  */
2369 static void
link_output_variables(struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)2370 link_output_variables(struct gl_linked_shader *linked_shader,
2371                       struct gl_shader **shader_list,
2372                       unsigned num_shaders)
2373 {
2374    struct glsl_symbol_table *symbols = linked_shader->symbols;
2375 
2376    for (unsigned i = 0; i < num_shaders; i++) {
2377 
2378       /* Skip shader object with main function */
2379       if (shader_list[i]->symbols->get_function("main"))
2380          continue;
2381 
2382       foreach_in_list(ir_instruction, ir, shader_list[i]->ir) {
2383          if (ir->ir_type != ir_type_variable)
2384             continue;
2385 
2386          ir_variable *var = (ir_variable *) ir;
2387 
2388          if (var->data.mode == ir_var_shader_out &&
2389                !symbols->get_variable(var->name)) {
2390             var = var->clone(linked_shader, NULL);
2391             symbols->add_variable(var);
2392             linked_shader->ir->push_head(var);
2393          }
2394       }
2395    }
2396 
2397    return;
2398 }
2399 
2400 
2401 /**
2402  * Combine a group of shaders for a single stage to generate a linked shader
2403  *
2404  * \note
2405  * If this function is supplied a single shader, it is cloned, and the new
2406  * shader is returned.
2407  */
2408 struct gl_linked_shader *
link_intrastage_shaders(void * mem_ctx,struct gl_context * ctx,struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders,bool allow_missing_main)2409 link_intrastage_shaders(void *mem_ctx,
2410                         struct gl_context *ctx,
2411                         struct gl_shader_program *prog,
2412                         struct gl_shader **shader_list,
2413                         unsigned num_shaders,
2414                         bool allow_missing_main)
2415 {
2416    struct gl_uniform_block *ubo_blocks = NULL;
2417    struct gl_uniform_block *ssbo_blocks = NULL;
2418    unsigned num_ubo_blocks = 0;
2419    unsigned num_ssbo_blocks = 0;
2420 
2421    /* Check that global variables defined in multiple shaders are consistent.
2422     */
2423    glsl_symbol_table variables;
2424    for (unsigned i = 0; i < num_shaders; i++) {
2425       if (shader_list[i] == NULL)
2426          continue;
2427       cross_validate_globals(ctx, prog, shader_list[i]->ir, &variables,
2428                              false);
2429    }
2430 
2431    if (!prog->data->LinkStatus)
2432       return NULL;
2433 
2434    /* Check that interface blocks defined in multiple shaders are consistent.
2435     */
2436    validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
2437                                         num_shaders);
2438    if (!prog->data->LinkStatus)
2439       return NULL;
2440 
2441    /* Check that there is only a single definition of each function signature
2442     * across all shaders.
2443     */
2444    for (unsigned i = 0; i < (num_shaders - 1); i++) {
2445       foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
2446          ir_function *const f = node->as_function();
2447 
2448          if (f == NULL)
2449             continue;
2450 
2451          for (unsigned j = i + 1; j < num_shaders; j++) {
2452             ir_function *const other =
2453                shader_list[j]->symbols->get_function(f->name);
2454 
2455             /* If the other shader has no function (and therefore no function
2456              * signatures) with the same name, skip to the next shader.
2457              */
2458             if (other == NULL)
2459                continue;
2460 
2461             foreach_in_list(ir_function_signature, sig, &f->signatures) {
2462                if (!sig->is_defined)
2463                   continue;
2464 
2465                ir_function_signature *other_sig =
2466                   other->exact_matching_signature(NULL, &sig->parameters);
2467 
2468                if (other_sig != NULL && other_sig->is_defined) {
2469                   linker_error(prog, "function `%s' is multiply defined\n",
2470                                f->name);
2471                   return NULL;
2472                }
2473             }
2474          }
2475       }
2476    }
2477 
2478    /* Find the shader that defines main, and make a clone of it.
2479     *
2480     * Starting with the clone, search for undefined references.  If one is
2481     * found, find the shader that defines it.  Clone the reference and add
2482     * it to the shader.  Repeat until there are no undefined references or
2483     * until a reference cannot be resolved.
2484     */
2485    gl_shader *main = NULL;
2486    for (unsigned i = 0; i < num_shaders; i++) {
2487       if (_mesa_get_main_function_signature(shader_list[i]->symbols)) {
2488          main = shader_list[i];
2489          break;
2490       }
2491    }
2492 
2493    if (main == NULL && allow_missing_main)
2494       main = shader_list[0];
2495 
2496    if (main == NULL) {
2497       linker_error(prog, "%s shader lacks `main'\n",
2498                    _mesa_shader_stage_to_string(shader_list[0]->Stage));
2499       return NULL;
2500    }
2501 
2502    gl_linked_shader *linked = rzalloc(NULL, struct gl_linked_shader);
2503    linked->Stage = shader_list[0]->Stage;
2504 
2505    /* Create program and attach it to the linked shader */
2506    struct gl_program *gl_prog =
2507       ctx->Driver.NewProgram(ctx, shader_list[0]->Stage, prog->Name, false);
2508    if (!gl_prog) {
2509       prog->data->LinkStatus = LINKING_FAILURE;
2510       _mesa_delete_linked_shader(ctx, linked);
2511       return NULL;
2512    }
2513 
2514    _mesa_reference_shader_program_data(ctx, &gl_prog->sh.data, prog->data);
2515 
2516    /* Don't use _mesa_reference_program() just take ownership */
2517    linked->Program = gl_prog;
2518 
2519    linked->ir = new(linked) exec_list;
2520    clone_ir_list(mem_ctx, linked->ir, main->ir);
2521 
2522    link_fs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
2523    link_tcs_out_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2524    link_tes_in_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2525    link_gs_inout_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2526    link_cs_input_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2527 
2528    if (linked->Stage != MESA_SHADER_FRAGMENT)
2529       link_xfb_stride_layout_qualifiers(ctx, prog, shader_list, num_shaders);
2530 
2531    link_bindless_layout_qualifiers(prog, shader_list, num_shaders);
2532 
2533    link_layer_viewport_relative_qualifier(prog, gl_prog, shader_list, num_shaders);
2534 
2535    populate_symbol_table(linked, shader_list[0]->symbols);
2536 
2537    /* The pointer to the main function in the final linked shader (i.e., the
2538     * copy of the original shader that contained the main function).
2539     */
2540    ir_function_signature *const main_sig =
2541       _mesa_get_main_function_signature(linked->symbols);
2542 
2543    /* Move any instructions other than variable declarations or function
2544     * declarations into main.
2545     */
2546    if (main_sig != NULL) {
2547       exec_node *insertion_point =
2548          move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
2549                                linked);
2550 
2551       for (unsigned i = 0; i < num_shaders; i++) {
2552          if (shader_list[i] == main)
2553             continue;
2554 
2555          insertion_point = move_non_declarations(shader_list[i]->ir,
2556                                                  insertion_point, true, linked);
2557       }
2558    }
2559 
2560    if (!link_function_calls(prog, linked, shader_list, num_shaders)) {
2561       _mesa_delete_linked_shader(ctx, linked);
2562       return NULL;
2563    }
2564 
2565    if (linked->Stage != MESA_SHADER_FRAGMENT)
2566       link_output_variables(linked, shader_list, num_shaders);
2567 
2568    /* Make a pass over all variable declarations to ensure that arrays with
2569     * unspecified sizes have a size specified.  The size is inferred from the
2570     * max_array_access field.
2571     */
2572    array_sizing_visitor v;
2573    v.run(linked->ir);
2574    v.fixup_unnamed_interface_types();
2575 
2576    /* Now that we know the sizes of all the arrays, we can replace .length()
2577     * calls with a constant expression.
2578     */
2579    array_length_to_const_visitor len_v;
2580    len_v.run(linked->ir);
2581 
2582    /* Link up uniform blocks defined within this stage. */
2583    link_uniform_blocks(mem_ctx, ctx, prog, linked, &ubo_blocks,
2584                        &num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
2585 
2586    const unsigned max_uniform_blocks =
2587       ctx->Const.Program[linked->Stage].MaxUniformBlocks;
2588    if (num_ubo_blocks > max_uniform_blocks) {
2589       linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
2590                    _mesa_shader_stage_to_string(linked->Stage),
2591                    num_ubo_blocks, max_uniform_blocks);
2592    }
2593 
2594    const unsigned max_shader_storage_blocks =
2595       ctx->Const.Program[linked->Stage].MaxShaderStorageBlocks;
2596    if (num_ssbo_blocks > max_shader_storage_blocks) {
2597       linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
2598                    _mesa_shader_stage_to_string(linked->Stage),
2599                    num_ssbo_blocks, max_shader_storage_blocks);
2600    }
2601 
2602    if (!prog->data->LinkStatus) {
2603       _mesa_delete_linked_shader(ctx, linked);
2604       return NULL;
2605    }
2606 
2607    /* Copy ubo blocks to linked shader list */
2608    linked->Program->sh.UniformBlocks =
2609       ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
2610    ralloc_steal(linked, ubo_blocks);
2611    for (unsigned i = 0; i < num_ubo_blocks; i++) {
2612       linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
2613    }
2614    linked->Program->sh.NumUniformBlocks = num_ubo_blocks;
2615    linked->Program->info.num_ubos = num_ubo_blocks;
2616 
2617    /* Copy ssbo blocks to linked shader list */
2618    linked->Program->sh.ShaderStorageBlocks =
2619       ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
2620    ralloc_steal(linked, ssbo_blocks);
2621    for (unsigned i = 0; i < num_ssbo_blocks; i++) {
2622       linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
2623    }
2624    linked->Program->info.num_ssbos = num_ssbo_blocks;
2625 
2626    /* At this point linked should contain all of the linked IR, so
2627     * validate it to make sure nothing went wrong.
2628     */
2629    validate_ir_tree(linked->ir);
2630 
2631    /* Set the size of geometry shader input arrays */
2632    if (linked->Stage == MESA_SHADER_GEOMETRY) {
2633       unsigned num_vertices =
2634          vertices_per_prim(gl_prog->info.gs.input_primitive);
2635       array_resize_visitor input_resize_visitor(num_vertices, prog,
2636                                                 MESA_SHADER_GEOMETRY);
2637       foreach_in_list(ir_instruction, ir, linked->ir) {
2638          ir->accept(&input_resize_visitor);
2639       }
2640    }
2641 
2642    if (ctx->Const.VertexID_is_zero_based)
2643       lower_vertex_id(linked);
2644 
2645    if (ctx->Const.LowerCsDerivedVariables)
2646       lower_cs_derived(linked);
2647 
2648 #ifdef DEBUG
2649    /* Compute the source checksum. */
2650    linked->SourceChecksum = 0;
2651    for (unsigned i = 0; i < num_shaders; i++) {
2652       if (shader_list[i] == NULL)
2653          continue;
2654       linked->SourceChecksum ^= shader_list[i]->SourceChecksum;
2655    }
2656 #endif
2657 
2658    return linked;
2659 }
2660 
2661 /**
2662  * Update the sizes of linked shader uniform arrays to the maximum
2663  * array index used.
2664  *
2665  * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
2666  *
2667  *     If one or more elements of an array are active,
2668  *     GetActiveUniform will return the name of the array in name,
2669  *     subject to the restrictions listed above. The type of the array
2670  *     is returned in type. The size parameter contains the highest
2671  *     array element index used, plus one. The compiler or linker
2672  *     determines the highest index used.  There will be only one
2673  *     active uniform reported by the GL per uniform array.
2674 
2675  */
2676 static void
update_array_sizes(struct gl_shader_program * prog)2677 update_array_sizes(struct gl_shader_program *prog)
2678 {
2679    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
2680          if (prog->_LinkedShaders[i] == NULL)
2681             continue;
2682 
2683       bool types_were_updated = false;
2684 
2685       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
2686          ir_variable *const var = node->as_variable();
2687 
2688          if ((var == NULL) || (var->data.mode != ir_var_uniform) ||
2689              !var->type->is_array())
2690             continue;
2691 
2692          /* GL_ARB_uniform_buffer_object says that std140 uniforms
2693           * will not be eliminated.  Since we always do std140, just
2694           * don't resize arrays in UBOs.
2695           *
2696           * Atomic counters are supposed to get deterministic
2697           * locations assigned based on the declaration ordering and
2698           * sizes, array compaction would mess that up.
2699           *
2700           * Subroutine uniforms are not removed.
2701           */
2702          if (var->is_in_buffer_block() || var->type->contains_atomic() ||
2703              var->type->contains_subroutine() || var->constant_initializer)
2704             continue;
2705 
2706          int size = var->data.max_array_access;
2707          for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
2708                if (prog->_LinkedShaders[j] == NULL)
2709                   continue;
2710 
2711             foreach_in_list(ir_instruction, node2, prog->_LinkedShaders[j]->ir) {
2712                ir_variable *other_var = node2->as_variable();
2713                if (!other_var)
2714                   continue;
2715 
2716                if (strcmp(var->name, other_var->name) == 0 &&
2717                    other_var->data.max_array_access > size) {
2718                   size = other_var->data.max_array_access;
2719                }
2720             }
2721          }
2722 
2723          if (size + 1 != (int)var->type->length) {
2724             /* If this is a built-in uniform (i.e., it's backed by some
2725              * fixed-function state), adjust the number of state slots to
2726              * match the new array size.  The number of slots per array entry
2727              * is not known.  It seems safe to assume that the total number of
2728              * slots is an integer multiple of the number of array elements.
2729              * Determine the number of slots per array element by dividing by
2730              * the old (total) size.
2731              */
2732             const unsigned num_slots = var->get_num_state_slots();
2733             if (num_slots > 0) {
2734                var->set_num_state_slots((size + 1)
2735                                         * (num_slots / var->type->length));
2736             }
2737 
2738             var->type = glsl_type::get_array_instance(var->type->fields.array,
2739                                                       size + 1);
2740             types_were_updated = true;
2741          }
2742       }
2743 
2744       /* Update the types of dereferences in case we changed any. */
2745       if (types_were_updated) {
2746          deref_type_updater v;
2747          v.run(prog->_LinkedShaders[i]->ir);
2748       }
2749    }
2750 }
2751 
2752 /**
2753  * Resize tessellation evaluation per-vertex inputs to the size of
2754  * tessellation control per-vertex outputs.
2755  */
2756 static void
resize_tes_inputs(struct gl_context * ctx,struct gl_shader_program * prog)2757 resize_tes_inputs(struct gl_context *ctx,
2758                   struct gl_shader_program *prog)
2759 {
2760    if (prog->_LinkedShaders[MESA_SHADER_TESS_EVAL] == NULL)
2761       return;
2762 
2763    gl_linked_shader *const tcs = prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
2764    gl_linked_shader *const tes = prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
2765 
2766    /* If no control shader is present, then the TES inputs are statically
2767     * sized to MaxPatchVertices; the actual size of the arrays won't be
2768     * known until draw time.
2769     */
2770    const int num_vertices = tcs
2771       ? tcs->Program->info.tess.tcs_vertices_out
2772       : ctx->Const.MaxPatchVertices;
2773 
2774    array_resize_visitor input_resize_visitor(num_vertices, prog,
2775                                              MESA_SHADER_TESS_EVAL);
2776    foreach_in_list(ir_instruction, ir, tes->ir) {
2777       ir->accept(&input_resize_visitor);
2778    }
2779 
2780    if (tcs) {
2781       /* Convert the gl_PatchVerticesIn system value into a constant, since
2782        * the value is known at this point.
2783        */
2784       foreach_in_list(ir_instruction, ir, tes->ir) {
2785          ir_variable *var = ir->as_variable();
2786          if (var && var->data.mode == ir_var_system_value &&
2787              var->data.location == SYSTEM_VALUE_VERTICES_IN) {
2788             void *mem_ctx = ralloc_parent(var);
2789             var->data.location = 0;
2790             var->data.explicit_location = false;
2791             var->data.mode = ir_var_auto;
2792             var->constant_value = new(mem_ctx) ir_constant(num_vertices);
2793          }
2794       }
2795    }
2796 }
2797 
2798 /**
2799  * Find a contiguous set of available bits in a bitmask.
2800  *
2801  * \param used_mask     Bits representing used (1) and unused (0) locations
2802  * \param needed_count  Number of contiguous bits needed.
2803  *
2804  * \return
2805  * Base location of the available bits on success or -1 on failure.
2806  */
2807 static int
find_available_slots(unsigned used_mask,unsigned needed_count)2808 find_available_slots(unsigned used_mask, unsigned needed_count)
2809 {
2810    unsigned needed_mask = (1 << needed_count) - 1;
2811    const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
2812 
2813    /* The comparison to 32 is redundant, but without it GCC emits "warning:
2814     * cannot optimize possibly infinite loops" for the loop below.
2815     */
2816    if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
2817       return -1;
2818 
2819    for (int i = 0; i <= max_bit_to_test; i++) {
2820       if ((needed_mask & ~used_mask) == needed_mask)
2821          return i;
2822 
2823       needed_mask <<= 1;
2824    }
2825 
2826    return -1;
2827 }
2828 
2829 
2830 #define SAFE_MASK_FROM_INDEX(i) (((i) >= 32) ? ~0 : ((1 << (i)) - 1))
2831 
2832 /**
2833  * Assign locations for either VS inputs or FS outputs.
2834  *
2835  * \param mem_ctx        Temporary ralloc context used for linking.
2836  * \param prog           Shader program whose variables need locations
2837  *                       assigned.
2838  * \param constants      Driver specific constant values for the program.
2839  * \param target_index   Selector for the program target to receive location
2840  *                       assignmnets.  Must be either \c MESA_SHADER_VERTEX or
2841  *                       \c MESA_SHADER_FRAGMENT.
2842  * \param do_assignment  Whether we are actually marking the assignment or we
2843  *                       are just doing a dry-run checking.
2844  *
2845  * \return
2846  * If locations are (or can be, in case of dry-running) successfully assigned,
2847  * true is returned.  Otherwise an error is emitted to the shader link log and
2848  * false is returned.
2849  */
2850 static bool
assign_attribute_or_color_locations(void * mem_ctx,gl_shader_program * prog,struct gl_constants * constants,unsigned target_index,bool do_assignment)2851 assign_attribute_or_color_locations(void *mem_ctx,
2852                                     gl_shader_program *prog,
2853                                     struct gl_constants *constants,
2854                                     unsigned target_index,
2855                                     bool do_assignment)
2856 {
2857    /* Maximum number of generic locations.  This corresponds to either the
2858     * maximum number of draw buffers or the maximum number of generic
2859     * attributes.
2860     */
2861    unsigned max_index = (target_index == MESA_SHADER_VERTEX) ?
2862       constants->Program[target_index].MaxAttribs :
2863       MAX2(constants->MaxDrawBuffers, constants->MaxDualSourceDrawBuffers);
2864 
2865    /* Mark invalid locations as being used.
2866     */
2867    unsigned used_locations = ~SAFE_MASK_FROM_INDEX(max_index);
2868    unsigned double_storage_locations = 0;
2869 
2870    assert((target_index == MESA_SHADER_VERTEX)
2871           || (target_index == MESA_SHADER_FRAGMENT));
2872 
2873    gl_linked_shader *const sh = prog->_LinkedShaders[target_index];
2874    if (sh == NULL)
2875       return true;
2876 
2877    /* Operate in a total of four passes.
2878     *
2879     * 1. Invalidate the location assignments for all vertex shader inputs.
2880     *
2881     * 2. Assign locations for inputs that have user-defined (via
2882     *    glBindVertexAttribLocation) locations and outputs that have
2883     *    user-defined locations (via glBindFragDataLocation).
2884     *
2885     * 3. Sort the attributes without assigned locations by number of slots
2886     *    required in decreasing order.  Fragmentation caused by attribute
2887     *    locations assigned by the application may prevent large attributes
2888     *    from having enough contiguous space.
2889     *
2890     * 4. Assign locations to any inputs without assigned locations.
2891     */
2892 
2893    const int generic_base = (target_index == MESA_SHADER_VERTEX)
2894       ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
2895 
2896    const enum ir_variable_mode direction =
2897       (target_index == MESA_SHADER_VERTEX)
2898       ? ir_var_shader_in : ir_var_shader_out;
2899 
2900 
2901    /* Temporary storage for the set of attributes that need locations assigned.
2902     */
2903    struct temp_attr {
2904       unsigned slots;
2905       ir_variable *var;
2906 
2907       /* Used below in the call to qsort. */
2908       static int compare(const void *a, const void *b)
2909       {
2910          const temp_attr *const l = (const temp_attr *) a;
2911          const temp_attr *const r = (const temp_attr *) b;
2912 
2913          /* Reversed because we want a descending order sort below. */
2914          return r->slots - l->slots;
2915       }
2916    } to_assign[32];
2917    assert(max_index <= 32);
2918 
2919    /* Temporary array for the set of attributes that have locations assigned,
2920     * for the purpose of checking overlapping slots/components of (non-ES)
2921     * fragment shader outputs.
2922     */
2923    ir_variable *assigned[12 * 4]; /* (max # of FS outputs) * # components */
2924    unsigned assigned_attr = 0;
2925 
2926    unsigned num_attr = 0;
2927 
2928    foreach_in_list(ir_instruction, node, sh->ir) {
2929       ir_variable *const var = node->as_variable();
2930 
2931       if ((var == NULL) || (var->data.mode != (unsigned) direction))
2932          continue;
2933 
2934       if (var->data.explicit_location) {
2935          var->data.is_unmatched_generic_inout = 0;
2936          if ((var->data.location >= (int)(max_index + generic_base))
2937              || (var->data.location < 0)) {
2938             linker_error(prog,
2939                          "invalid explicit location %d specified for `%s'\n",
2940                          (var->data.location < 0)
2941                          ? var->data.location
2942                          : var->data.location - generic_base,
2943                          var->name);
2944             return false;
2945          }
2946       } else if (target_index == MESA_SHADER_VERTEX) {
2947          unsigned binding;
2948 
2949          if (prog->AttributeBindings->get(binding, var->name)) {
2950             assert(binding >= VERT_ATTRIB_GENERIC0);
2951             var->data.location = binding;
2952             var->data.is_unmatched_generic_inout = 0;
2953          }
2954       } else if (target_index == MESA_SHADER_FRAGMENT) {
2955          unsigned binding;
2956          unsigned index;
2957          const char *name = var->name;
2958          const glsl_type *type = var->type;
2959 
2960          while (type) {
2961             /* Check if there's a binding for the variable name */
2962             if (prog->FragDataBindings->get(binding, name)) {
2963                assert(binding >= FRAG_RESULT_DATA0);
2964                var->data.location = binding;
2965                var->data.is_unmatched_generic_inout = 0;
2966 
2967                if (prog->FragDataIndexBindings->get(index, name)) {
2968                   var->data.index = index;
2969                }
2970                break;
2971             }
2972 
2973             /* If not, but it's an array type, look for name[0] */
2974             if (type->is_array()) {
2975                name = ralloc_asprintf(mem_ctx, "%s[0]", name);
2976                type = type->fields.array;
2977                continue;
2978             }
2979 
2980             break;
2981          }
2982       }
2983 
2984       if (strcmp(var->name, "gl_LastFragData") == 0)
2985          continue;
2986 
2987       /* From GL4.5 core spec, section 15.2 (Shader Execution):
2988        *
2989        *     "Output binding assignments will cause LinkProgram to fail:
2990        *     ...
2991        *     If the program has an active output assigned to a location greater
2992        *     than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
2993        *     an active output assigned an index greater than or equal to one;"
2994        */
2995       if (target_index == MESA_SHADER_FRAGMENT && var->data.index >= 1 &&
2996           var->data.location - generic_base >=
2997           (int) constants->MaxDualSourceDrawBuffers) {
2998          linker_error(prog,
2999                       "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
3000                       "with index %u for %s\n",
3001                       var->data.location - generic_base, var->data.index,
3002                       var->name);
3003          return false;
3004       }
3005 
3006       const unsigned slots = var->type->count_attribute_slots(target_index == MESA_SHADER_VERTEX);
3007 
3008       /* If the variable is not a built-in and has a location statically
3009        * assigned in the shader (presumably via a layout qualifier), make sure
3010        * that it doesn't collide with other assigned locations.  Otherwise,
3011        * add it to the list of variables that need linker-assigned locations.
3012        */
3013       if (var->data.location != -1) {
3014          if (var->data.location >= generic_base && var->data.index < 1) {
3015             /* From page 61 of the OpenGL 4.0 spec:
3016              *
3017              *     "LinkProgram will fail if the attribute bindings assigned
3018              *     by BindAttribLocation do not leave not enough space to
3019              *     assign a location for an active matrix attribute or an
3020              *     active attribute array, both of which require multiple
3021              *     contiguous generic attributes."
3022              *
3023              * I think above text prohibits the aliasing of explicit and
3024              * automatic assignments. But, aliasing is allowed in manual
3025              * assignments of attribute locations. See below comments for
3026              * the details.
3027              *
3028              * From OpenGL 4.0 spec, page 61:
3029              *
3030              *     "It is possible for an application to bind more than one
3031              *     attribute name to the same location. This is referred to as
3032              *     aliasing. This will only work if only one of the aliased
3033              *     attributes is active in the executable program, or if no
3034              *     path through the shader consumes more than one attribute of
3035              *     a set of attributes aliased to the same location. A link
3036              *     error can occur if the linker determines that every path
3037              *     through the shader consumes multiple aliased attributes,
3038              *     but implementations are not required to generate an error
3039              *     in this case."
3040              *
3041              * From GLSL 4.30 spec, page 54:
3042              *
3043              *    "A program will fail to link if any two non-vertex shader
3044              *     input variables are assigned to the same location. For
3045              *     vertex shaders, multiple input variables may be assigned
3046              *     to the same location using either layout qualifiers or via
3047              *     the OpenGL API. However, such aliasing is intended only to
3048              *     support vertex shaders where each execution path accesses
3049              *     at most one input per each location. Implementations are
3050              *     permitted, but not required, to generate link-time errors
3051              *     if they detect that every path through the vertex shader
3052              *     executable accesses multiple inputs assigned to any single
3053              *     location. For all shader types, a program will fail to link
3054              *     if explicit location assignments leave the linker unable
3055              *     to find space for other variables without explicit
3056              *     assignments."
3057              *
3058              * From OpenGL ES 3.0 spec, page 56:
3059              *
3060              *    "Binding more than one attribute name to the same location
3061              *     is referred to as aliasing, and is not permitted in OpenGL
3062              *     ES Shading Language 3.00 vertex shaders. LinkProgram will
3063              *     fail when this condition exists. However, aliasing is
3064              *     possible in OpenGL ES Shading Language 1.00 vertex shaders.
3065              *     This will only work if only one of the aliased attributes
3066              *     is active in the executable program, or if no path through
3067              *     the shader consumes more than one attribute of a set of
3068              *     attributes aliased to the same location. A link error can
3069              *     occur if the linker determines that every path through the
3070              *     shader consumes multiple aliased attributes, but implemen-
3071              *     tations are not required to generate an error in this case."
3072              *
3073              * After looking at above references from OpenGL, OpenGL ES and
3074              * GLSL specifications, we allow aliasing of vertex input variables
3075              * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
3076              *
3077              * NOTE: This is not required by the spec but its worth mentioning
3078              * here that we're not doing anything to make sure that no path
3079              * through the vertex shader executable accesses multiple inputs
3080              * assigned to any single location.
3081              */
3082 
3083             /* Mask representing the contiguous slots that will be used by
3084              * this attribute.
3085              */
3086             const unsigned attr = var->data.location - generic_base;
3087             const unsigned use_mask = (1 << slots) - 1;
3088             const char *const string = (target_index == MESA_SHADER_VERTEX)
3089                ? "vertex shader input" : "fragment shader output";
3090 
3091             /* Generate a link error if the requested locations for this
3092              * attribute exceed the maximum allowed attribute location.
3093              */
3094             if (attr + slots > max_index) {
3095                linker_error(prog,
3096                            "insufficient contiguous locations "
3097                            "available for %s `%s' %d %d %d\n", string,
3098                            var->name, used_locations, use_mask, attr);
3099                return false;
3100             }
3101 
3102             /* Generate a link error if the set of bits requested for this
3103              * attribute overlaps any previously allocated bits.
3104              */
3105             if ((~(use_mask << attr) & used_locations) != used_locations) {
3106                if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
3107                   /* From section 4.4.2 (Output Layout Qualifiers) of the GLSL
3108                    * 4.40 spec:
3109                    *
3110                    *    "Additionally, for fragment shader outputs, if two
3111                    *    variables are placed within the same location, they
3112                    *    must have the same underlying type (floating-point or
3113                    *    integer). No component aliasing of output variables or
3114                    *    members is allowed.
3115                    */
3116                   for (unsigned i = 0; i < assigned_attr; i++) {
3117                      unsigned assigned_slots =
3118                         assigned[i]->type->count_attribute_slots(false);
3119                      unsigned assig_attr =
3120                         assigned[i]->data.location - generic_base;
3121                      unsigned assigned_use_mask = (1 << assigned_slots) - 1;
3122 
3123                      if ((assigned_use_mask << assig_attr) &
3124                          (use_mask << attr)) {
3125 
3126                         const glsl_type *assigned_type =
3127                            assigned[i]->type->without_array();
3128                         const glsl_type *type = var->type->without_array();
3129                         if (assigned_type->base_type != type->base_type) {
3130                            linker_error(prog, "types do not match for aliased"
3131                                         " %ss %s and %s\n", string,
3132                                         assigned[i]->name, var->name);
3133                            return false;
3134                         }
3135 
3136                         unsigned assigned_component_mask =
3137                            ((1 << assigned_type->vector_elements) - 1) <<
3138                            assigned[i]->data.location_frac;
3139                         unsigned component_mask =
3140                            ((1 << type->vector_elements) - 1) <<
3141                            var->data.location_frac;
3142                         if (assigned_component_mask & component_mask) {
3143                            linker_error(prog, "overlapping component is "
3144                                         "assigned to %ss %s and %s "
3145                                         "(component=%d)\n",
3146                                         string, assigned[i]->name, var->name,
3147                                         var->data.location_frac);
3148                            return false;
3149                         }
3150                      }
3151                   }
3152                } else if (target_index == MESA_SHADER_FRAGMENT ||
3153                           (prog->IsES && prog->data->Version >= 300)) {
3154                   linker_error(prog, "overlapping location is assigned "
3155                                "to %s `%s' %d %d %d\n", string, var->name,
3156                                used_locations, use_mask, attr);
3157                   return false;
3158                } else {
3159                   linker_warning(prog, "overlapping location is assigned "
3160                                  "to %s `%s' %d %d %d\n", string, var->name,
3161                                  used_locations, use_mask, attr);
3162                }
3163             }
3164 
3165             if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
3166                /* Only track assigned variables for non-ES fragment shaders
3167                 * to avoid overflowing the array.
3168                 *
3169                 * At most one variable per fragment output component should
3170                 * reach this.
3171                 */
3172                assert(assigned_attr < ARRAY_SIZE(assigned));
3173                assigned[assigned_attr] = var;
3174                assigned_attr++;
3175             }
3176 
3177             used_locations |= (use_mask << attr);
3178 
3179             /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
3180              *
3181              * "A program with more than the value of MAX_VERTEX_ATTRIBS
3182              *  active attribute variables may fail to link, unless
3183              *  device-dependent optimizations are able to make the program
3184              *  fit within available hardware resources. For the purposes
3185              *  of this test, attribute variables of the type dvec3, dvec4,
3186              *  dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
3187              *  count as consuming twice as many attributes as equivalent
3188              *  single-precision types. While these types use the same number
3189              *  of generic attributes as their single-precision equivalents,
3190              *  implementations are permitted to consume two single-precision
3191              *  vectors of internal storage for each three- or four-component
3192              *  double-precision vector."
3193              *
3194              * Mark this attribute slot as taking up twice as much space
3195              * so we can count it properly against limits.  According to
3196              * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
3197              * is optional behavior, but it seems preferable.
3198              */
3199             if (var->type->without_array()->is_dual_slot())
3200                double_storage_locations |= (use_mask << attr);
3201          }
3202 
3203          continue;
3204       }
3205 
3206       if (num_attr >= max_index) {
3207          linker_error(prog, "too many %s (max %u)",
3208                       target_index == MESA_SHADER_VERTEX ?
3209                       "vertex shader inputs" : "fragment shader outputs",
3210                       max_index);
3211          return false;
3212       }
3213       to_assign[num_attr].slots = slots;
3214       to_assign[num_attr].var = var;
3215       num_attr++;
3216    }
3217 
3218    if (!do_assignment)
3219       return true;
3220 
3221    if (target_index == MESA_SHADER_VERTEX) {
3222       unsigned total_attribs_size =
3223          util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
3224          util_bitcount(double_storage_locations);
3225       if (total_attribs_size > max_index) {
3226          linker_error(prog,
3227                       "attempt to use %d vertex attribute slots only %d available ",
3228                       total_attribs_size, max_index);
3229          return false;
3230       }
3231    }
3232 
3233    /* If all of the attributes were assigned locations by the application (or
3234     * are built-in attributes with fixed locations), return early.  This should
3235     * be the common case.
3236     */
3237    if (num_attr == 0)
3238       return true;
3239 
3240    qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
3241 
3242    if (target_index == MESA_SHADER_VERTEX) {
3243       /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS.  It can
3244        * only be explicitly assigned by via glBindAttribLocation.  Mark it as
3245        * reserved to prevent it from being automatically allocated below.
3246        */
3247       find_deref_visitor find("gl_Vertex");
3248       find.run(sh->ir);
3249       if (find.variable_found())
3250          used_locations |= (1 << 0);
3251    }
3252 
3253    for (unsigned i = 0; i < num_attr; i++) {
3254       /* Mask representing the contiguous slots that will be used by this
3255        * attribute.
3256        */
3257       const unsigned use_mask = (1 << to_assign[i].slots) - 1;
3258 
3259       int location = find_available_slots(used_locations, to_assign[i].slots);
3260 
3261       if (location < 0) {
3262          const char *const string = (target_index == MESA_SHADER_VERTEX)
3263             ? "vertex shader input" : "fragment shader output";
3264 
3265          linker_error(prog,
3266                       "insufficient contiguous locations "
3267                       "available for %s `%s'\n",
3268                       string, to_assign[i].var->name);
3269          return false;
3270       }
3271 
3272       to_assign[i].var->data.location = generic_base + location;
3273       to_assign[i].var->data.is_unmatched_generic_inout = 0;
3274       used_locations |= (use_mask << location);
3275 
3276       if (to_assign[i].var->type->without_array()->is_dual_slot())
3277          double_storage_locations |= (use_mask << location);
3278    }
3279 
3280    /* Now that we have all the locations, from the GL 4.5 core spec, section
3281     * 11.1.1 (Vertex Attributes), dvec3, dvec4, dmat2x3, dmat2x4, dmat3,
3282     * dmat3x4, dmat4x3, and dmat4 count as consuming twice as many attributes
3283     * as equivalent single-precision types.
3284     */
3285    if (target_index == MESA_SHADER_VERTEX) {
3286       unsigned total_attribs_size =
3287          util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
3288          util_bitcount(double_storage_locations);
3289       if (total_attribs_size > max_index) {
3290          linker_error(prog,
3291                       "attempt to use %d vertex attribute slots only %d available ",
3292                       total_attribs_size, max_index);
3293          return false;
3294       }
3295    }
3296 
3297    return true;
3298 }
3299 
3300 /**
3301  * Match explicit locations of outputs to inputs and deactivate the
3302  * unmatch flag if found so we don't optimise them away.
3303  */
3304 static void
match_explicit_outputs_to_inputs(gl_linked_shader * producer,gl_linked_shader * consumer)3305 match_explicit_outputs_to_inputs(gl_linked_shader *producer,
3306                                  gl_linked_shader *consumer)
3307 {
3308    glsl_symbol_table parameters;
3309    ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
3310       { {NULL, NULL} };
3311 
3312    /* Find all shader outputs in the "producer" stage.
3313     */
3314    foreach_in_list(ir_instruction, node, producer->ir) {
3315       ir_variable *const var = node->as_variable();
3316 
3317       if ((var == NULL) || (var->data.mode != ir_var_shader_out))
3318          continue;
3319 
3320       if (var->data.explicit_location &&
3321           var->data.location >= VARYING_SLOT_VAR0) {
3322          const unsigned idx = var->data.location - VARYING_SLOT_VAR0;
3323          if (explicit_locations[idx][var->data.location_frac] == NULL)
3324             explicit_locations[idx][var->data.location_frac] = var;
3325 
3326          /* Always match TCS outputs. They are shared by all invocations
3327           * within a patch and can be used as shared memory.
3328           */
3329          if (producer->Stage == MESA_SHADER_TESS_CTRL)
3330             var->data.is_unmatched_generic_inout = 0;
3331       }
3332    }
3333 
3334    /* Match inputs to outputs */
3335    foreach_in_list(ir_instruction, node, consumer->ir) {
3336       ir_variable *const input = node->as_variable();
3337 
3338       if ((input == NULL) || (input->data.mode != ir_var_shader_in))
3339          continue;
3340 
3341       ir_variable *output = NULL;
3342       if (input->data.explicit_location
3343           && input->data.location >= VARYING_SLOT_VAR0) {
3344          output = explicit_locations[input->data.location - VARYING_SLOT_VAR0]
3345             [input->data.location_frac];
3346 
3347          if (output != NULL){
3348             input->data.is_unmatched_generic_inout = 0;
3349             output->data.is_unmatched_generic_inout = 0;
3350          }
3351       }
3352    }
3353 }
3354 
3355 /**
3356  * Store the gl_FragDepth layout in the gl_shader_program struct.
3357  */
3358 static void
store_fragdepth_layout(struct gl_shader_program * prog)3359 store_fragdepth_layout(struct gl_shader_program *prog)
3360 {
3361    if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
3362       return;
3363    }
3364 
3365    struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
3366 
3367    /* We don't look up the gl_FragDepth symbol directly because if
3368     * gl_FragDepth is not used in the shader, it's removed from the IR.
3369     * However, the symbol won't be removed from the symbol table.
3370     *
3371     * We're only interested in the cases where the variable is NOT removed
3372     * from the IR.
3373     */
3374    foreach_in_list(ir_instruction, node, ir) {
3375       ir_variable *const var = node->as_variable();
3376 
3377       if (var == NULL || var->data.mode != ir_var_shader_out) {
3378          continue;
3379       }
3380 
3381       if (strcmp(var->name, "gl_FragDepth") == 0) {
3382          switch (var->data.depth_layout) {
3383          case ir_depth_layout_none:
3384             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
3385             return;
3386          case ir_depth_layout_any:
3387             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
3388             return;
3389          case ir_depth_layout_greater:
3390             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
3391             return;
3392          case ir_depth_layout_less:
3393             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
3394             return;
3395          case ir_depth_layout_unchanged:
3396             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
3397             return;
3398          default:
3399             assert(0);
3400             return;
3401          }
3402       }
3403    }
3404 }
3405 
3406 /**
3407  * Validate shader image resources.
3408  */
3409 static void
check_image_resources(struct gl_context * ctx,struct gl_shader_program * prog)3410 check_image_resources(struct gl_context *ctx, struct gl_shader_program *prog)
3411 {
3412    unsigned total_image_units = 0;
3413    unsigned fragment_outputs = 0;
3414    unsigned total_shader_storage_blocks = 0;
3415 
3416    if (!ctx->Extensions.ARB_shader_image_load_store)
3417       return;
3418 
3419    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3420       struct gl_linked_shader *sh = prog->_LinkedShaders[i];
3421 
3422       if (sh) {
3423          total_image_units += sh->Program->info.num_images;
3424          total_shader_storage_blocks += sh->Program->info.num_ssbos;
3425 
3426          if (i == MESA_SHADER_FRAGMENT) {
3427             foreach_in_list(ir_instruction, node, sh->ir) {
3428                ir_variable *var = node->as_variable();
3429                if (var && var->data.mode == ir_var_shader_out)
3430                   /* since there are no double fs outputs - pass false */
3431                   fragment_outputs += var->type->count_attribute_slots(false);
3432             }
3433          }
3434       }
3435    }
3436 
3437    if (total_image_units > ctx->Const.MaxCombinedImageUniforms)
3438       linker_error(prog, "Too many combined image uniforms\n");
3439 
3440    if (total_image_units + fragment_outputs + total_shader_storage_blocks >
3441        ctx->Const.MaxCombinedShaderOutputResources)
3442       linker_error(prog, "Too many combined image uniforms, shader storage "
3443                          " buffers and fragment outputs\n");
3444 }
3445 
3446 
3447 /**
3448  * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
3449  * for a variable, checks for overlaps between other uniforms using explicit
3450  * locations.
3451  */
3452 static int
reserve_explicit_locations(struct gl_shader_program * prog,string_to_uint_map * map,ir_variable * var)3453 reserve_explicit_locations(struct gl_shader_program *prog,
3454                            string_to_uint_map *map, ir_variable *var)
3455 {
3456    unsigned slots = var->type->uniform_locations();
3457    unsigned max_loc = var->data.location + slots - 1;
3458    unsigned return_value = slots;
3459 
3460    /* Resize remap table if locations do not fit in the current one. */
3461    if (max_loc + 1 > prog->NumUniformRemapTable) {
3462       prog->UniformRemapTable =
3463          reralloc(prog, prog->UniformRemapTable,
3464                   gl_uniform_storage *,
3465                   max_loc + 1);
3466 
3467       if (!prog->UniformRemapTable) {
3468          linker_error(prog, "Out of memory during linking.\n");
3469          return -1;
3470       }
3471 
3472       /* Initialize allocated space. */
3473       for (unsigned i = prog->NumUniformRemapTable; i < max_loc + 1; i++)
3474          prog->UniformRemapTable[i] = NULL;
3475 
3476       prog->NumUniformRemapTable = max_loc + 1;
3477    }
3478 
3479    for (unsigned i = 0; i < slots; i++) {
3480       unsigned loc = var->data.location + i;
3481 
3482       /* Check if location is already used. */
3483       if (prog->UniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3484 
3485          /* Possibly same uniform from a different stage, this is ok. */
3486          unsigned hash_loc;
3487          if (map->get(hash_loc, var->name) && hash_loc == loc - i) {
3488             return_value = 0;
3489             continue;
3490          }
3491 
3492          /* ARB_explicit_uniform_location specification states:
3493           *
3494           *     "No two default-block uniform variables in the program can have
3495           *     the same location, even if they are unused, otherwise a compiler
3496           *     or linker error will be generated."
3497           */
3498          linker_error(prog,
3499                       "location qualifier for uniform %s overlaps "
3500                       "previously used location\n",
3501                       var->name);
3502          return -1;
3503       }
3504 
3505       /* Initialize location as inactive before optimization
3506        * rounds and location assignment.
3507        */
3508       prog->UniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3509    }
3510 
3511    /* Note, base location used for arrays. */
3512    map->put(var->data.location, var->name);
3513 
3514    return return_value;
3515 }
3516 
3517 static bool
reserve_subroutine_explicit_locations(struct gl_shader_program * prog,struct gl_program * p,ir_variable * var)3518 reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
3519                                       struct gl_program *p,
3520                                       ir_variable *var)
3521 {
3522    unsigned slots = var->type->uniform_locations();
3523    unsigned max_loc = var->data.location + slots - 1;
3524 
3525    /* Resize remap table if locations do not fit in the current one. */
3526    if (max_loc + 1 > p->sh.NumSubroutineUniformRemapTable) {
3527       p->sh.SubroutineUniformRemapTable =
3528          reralloc(p, p->sh.SubroutineUniformRemapTable,
3529                   gl_uniform_storage *,
3530                   max_loc + 1);
3531 
3532       if (!p->sh.SubroutineUniformRemapTable) {
3533          linker_error(prog, "Out of memory during linking.\n");
3534          return false;
3535       }
3536 
3537       /* Initialize allocated space. */
3538       for (unsigned i = p->sh.NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
3539          p->sh.SubroutineUniformRemapTable[i] = NULL;
3540 
3541       p->sh.NumSubroutineUniformRemapTable = max_loc + 1;
3542    }
3543 
3544    for (unsigned i = 0; i < slots; i++) {
3545       unsigned loc = var->data.location + i;
3546 
3547       /* Check if location is already used. */
3548       if (p->sh.SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3549 
3550          /* ARB_explicit_uniform_location specification states:
3551           *     "No two subroutine uniform variables can have the same location
3552           *     in the same shader stage, otherwise a compiler or linker error
3553           *     will be generated."
3554           */
3555          linker_error(prog,
3556                       "location qualifier for uniform %s overlaps "
3557                       "previously used location\n",
3558                       var->name);
3559          return false;
3560       }
3561 
3562       /* Initialize location as inactive before optimization
3563        * rounds and location assignment.
3564        */
3565       p->sh.SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3566    }
3567 
3568    return true;
3569 }
3570 /**
3571  * Check and reserve all explicit uniform locations, called before
3572  * any optimizations happen to handle also inactive uniforms and
3573  * inactive array elements that may get trimmed away.
3574  */
3575 static void
check_explicit_uniform_locations(struct gl_context * ctx,struct gl_shader_program * prog)3576 check_explicit_uniform_locations(struct gl_context *ctx,
3577                                  struct gl_shader_program *prog)
3578 {
3579    prog->NumExplicitUniformLocations = 0;
3580 
3581    if (!ctx->Extensions.ARB_explicit_uniform_location)
3582       return;
3583 
3584    /* This map is used to detect if overlapping explicit locations
3585     * occur with the same uniform (from different stage) or a different one.
3586     */
3587    string_to_uint_map *uniform_map = new string_to_uint_map;
3588 
3589    if (!uniform_map) {
3590       linker_error(prog, "Out of memory during linking.\n");
3591       return;
3592    }
3593 
3594    unsigned entries_total = 0;
3595    unsigned mask = prog->data->linked_stages;
3596    while (mask) {
3597       const int i = u_bit_scan(&mask);
3598       struct gl_program *p = prog->_LinkedShaders[i]->Program;
3599 
3600       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
3601          ir_variable *var = node->as_variable();
3602          if (!var || var->data.mode != ir_var_uniform)
3603             continue;
3604 
3605          if (var->data.explicit_location) {
3606             bool ret = false;
3607             if (var->type->without_array()->is_subroutine())
3608                ret = reserve_subroutine_explicit_locations(prog, p, var);
3609             else {
3610                int slots = reserve_explicit_locations(prog, uniform_map,
3611                                                       var);
3612                if (slots != -1) {
3613                   ret = true;
3614                   entries_total += slots;
3615                }
3616             }
3617             if (!ret) {
3618                delete uniform_map;
3619                return;
3620             }
3621          }
3622       }
3623    }
3624 
3625    link_util_update_empty_uniform_locations(prog);
3626 
3627    delete uniform_map;
3628    prog->NumExplicitUniformLocations = entries_total;
3629 }
3630 
3631 /* Function checks if a variable var is a packed varying and
3632  * if given name is part of packed varying's list.
3633  *
3634  * If a variable is a packed varying, it has a name like
3635  * 'packed:a,b,c' where a, b and c are separate variables.
3636  */
3637 static bool
included_in_packed_varying(ir_variable * var,const char * name)3638 included_in_packed_varying(ir_variable *var, const char *name)
3639 {
3640    if (strncmp(var->name, "packed:", 7) != 0)
3641       return false;
3642 
3643    char *list = strdup(var->name + 7);
3644    assert(list);
3645 
3646    bool found = false;
3647    char *saveptr;
3648    char *token = strtok_r(list, ",", &saveptr);
3649    while (token) {
3650       if (strcmp(token, name) == 0) {
3651          found = true;
3652          break;
3653       }
3654       token = strtok_r(NULL, ",", &saveptr);
3655    }
3656    free(list);
3657    return found;
3658 }
3659 
3660 /**
3661  * Function builds a stage reference bitmask from variable name.
3662  */
3663 static uint8_t
build_stageref(struct gl_shader_program * shProg,const char * name,unsigned mode)3664 build_stageref(struct gl_shader_program *shProg, const char *name,
3665                unsigned mode)
3666 {
3667    uint8_t stages = 0;
3668 
3669    /* Note, that we assume MAX 8 stages, if there will be more stages, type
3670     * used for reference mask in gl_program_resource will need to be changed.
3671     */
3672    assert(MESA_SHADER_STAGES < 8);
3673 
3674    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3675       struct gl_linked_shader *sh = shProg->_LinkedShaders[i];
3676       if (!sh)
3677          continue;
3678 
3679       /* Shader symbol table may contain variables that have
3680        * been optimized away. Search IR for the variable instead.
3681        */
3682       foreach_in_list(ir_instruction, node, sh->ir) {
3683          ir_variable *var = node->as_variable();
3684          if (var) {
3685             unsigned baselen = strlen(var->name);
3686 
3687             if (included_in_packed_varying(var, name)) {
3688                   stages |= (1 << i);
3689                   break;
3690             }
3691 
3692             /* Type needs to match if specified, otherwise we might
3693              * pick a variable with same name but different interface.
3694              */
3695             if (var->data.mode != mode)
3696                continue;
3697 
3698             if (strncmp(var->name, name, baselen) == 0) {
3699                /* Check for exact name matches but also check for arrays and
3700                 * structs.
3701                 */
3702                if (name[baselen] == '\0' ||
3703                    name[baselen] == '[' ||
3704                    name[baselen] == '.') {
3705                   stages |= (1 << i);
3706                   break;
3707                }
3708             }
3709          }
3710       }
3711    }
3712    return stages;
3713 }
3714 
3715 /**
3716  * Create gl_shader_variable from ir_variable class.
3717  */
3718 static gl_shader_variable *
create_shader_variable(struct gl_shader_program * shProg,const ir_variable * in,const char * name,const glsl_type * type,const glsl_type * interface_type,bool use_implicit_location,int location,const glsl_type * outermost_struct_type)3719 create_shader_variable(struct gl_shader_program *shProg,
3720                        const ir_variable *in,
3721                        const char *name, const glsl_type *type,
3722                        const glsl_type *interface_type,
3723                        bool use_implicit_location, int location,
3724                        const glsl_type *outermost_struct_type)
3725 {
3726    /* Allocate zero-initialized memory to ensure that bitfield padding
3727     * is zero.
3728     */
3729    gl_shader_variable *out = rzalloc(shProg, struct gl_shader_variable);
3730    if (!out)
3731       return NULL;
3732 
3733    /* Since gl_VertexID may be lowered to gl_VertexIDMESA, but applications
3734     * expect to see gl_VertexID in the program resource list.  Pretend.
3735     */
3736    if (in->data.mode == ir_var_system_value &&
3737        in->data.location == SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) {
3738       out->name = ralloc_strdup(shProg, "gl_VertexID");
3739    } else if ((in->data.mode == ir_var_shader_out &&
3740                in->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) ||
3741               (in->data.mode == ir_var_system_value &&
3742                in->data.location == SYSTEM_VALUE_TESS_LEVEL_OUTER)) {
3743       out->name = ralloc_strdup(shProg, "gl_TessLevelOuter");
3744       type = glsl_type::get_array_instance(glsl_type::float_type, 4);
3745    } else if ((in->data.mode == ir_var_shader_out &&
3746                in->data.location == VARYING_SLOT_TESS_LEVEL_INNER) ||
3747               (in->data.mode == ir_var_system_value &&
3748                in->data.location == SYSTEM_VALUE_TESS_LEVEL_INNER)) {
3749       out->name = ralloc_strdup(shProg, "gl_TessLevelInner");
3750       type = glsl_type::get_array_instance(glsl_type::float_type, 2);
3751    } else {
3752       out->name = ralloc_strdup(shProg, name);
3753    }
3754 
3755    if (!out->name)
3756       return NULL;
3757 
3758    /* The ARB_program_interface_query spec says:
3759     *
3760     *     "Not all active variables are assigned valid locations; the
3761     *     following variables will have an effective location of -1:
3762     *
3763     *      * uniforms declared as atomic counters;
3764     *
3765     *      * members of a uniform block;
3766     *
3767     *      * built-in inputs, outputs, and uniforms (starting with "gl_"); and
3768     *
3769     *      * inputs or outputs not declared with a "location" layout
3770     *        qualifier, except for vertex shader inputs and fragment shader
3771     *        outputs."
3772     */
3773    if (in->type->is_atomic_uint() || is_gl_identifier(in->name) ||
3774        !(in->data.explicit_location || use_implicit_location)) {
3775       out->location = -1;
3776    } else {
3777       out->location = location;
3778    }
3779 
3780    out->type = type;
3781    out->outermost_struct_type = outermost_struct_type;
3782    out->interface_type = interface_type;
3783    out->component = in->data.location_frac;
3784    out->index = in->data.index;
3785    out->patch = in->data.patch;
3786    out->mode = in->data.mode;
3787    out->interpolation = in->data.interpolation;
3788    out->explicit_location = in->data.explicit_location;
3789    out->precision = in->data.precision;
3790 
3791    return out;
3792 }
3793 
3794 static bool
add_shader_variable(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set,unsigned stage_mask,GLenum programInterface,ir_variable * var,const char * name,const glsl_type * type,bool use_implicit_location,int location,bool inouts_share_location,const glsl_type * outermost_struct_type=NULL)3795 add_shader_variable(const struct gl_context *ctx,
3796                     struct gl_shader_program *shProg,
3797                     struct set *resource_set,
3798                     unsigned stage_mask,
3799                     GLenum programInterface, ir_variable *var,
3800                     const char *name, const glsl_type *type,
3801                     bool use_implicit_location, int location,
3802                     bool inouts_share_location,
3803                     const glsl_type *outermost_struct_type = NULL)
3804 {
3805    const glsl_type *interface_type = var->get_interface_type();
3806 
3807    if (outermost_struct_type == NULL) {
3808       if (var->data.from_named_ifc_block) {
3809          const char *interface_name = interface_type->name;
3810 
3811          if (interface_type->is_array()) {
3812             /* Issue #16 of the ARB_program_interface_query spec says:
3813              *
3814              * "* If a variable is a member of an interface block without an
3815              *    instance name, it is enumerated using just the variable name.
3816              *
3817              *  * If a variable is a member of an interface block with an
3818              *    instance name, it is enumerated as "BlockName.Member", where
3819              *    "BlockName" is the name of the interface block (not the
3820              *    instance name) and "Member" is the name of the variable."
3821              *
3822              * In particular, it indicates that it should be "BlockName",
3823              * not "BlockName[array length]".  The conformance suite and
3824              * dEQP both require this behavior.
3825              *
3826              * Here, we unwrap the extra array level added by named interface
3827              * block array lowering so we have the correct variable type.  We
3828              * also unwrap the interface type when constructing the name.
3829              *
3830              * We leave interface_type the same so that ES 3.x SSO pipeline
3831              * validation can enforce the rules requiring array length to
3832              * match on interface blocks.
3833              */
3834             type = type->fields.array;
3835 
3836             interface_name = interface_type->fields.array->name;
3837          }
3838 
3839          name = ralloc_asprintf(shProg, "%s.%s", interface_name, name);
3840       }
3841    }
3842 
3843    switch (type->base_type) {
3844    case GLSL_TYPE_STRUCT: {
3845       /* The ARB_program_interface_query spec says:
3846        *
3847        *     "For an active variable declared as a structure, a separate entry
3848        *     will be generated for each active structure member.  The name of
3849        *     each entry is formed by concatenating the name of the structure,
3850        *     the "."  character, and the name of the structure member.  If a
3851        *     structure member to enumerate is itself a structure or array,
3852        *     these enumeration rules are applied recursively."
3853        */
3854       if (outermost_struct_type == NULL)
3855          outermost_struct_type = type;
3856 
3857       unsigned field_location = location;
3858       for (unsigned i = 0; i < type->length; i++) {
3859          const struct glsl_struct_field *field = &type->fields.structure[i];
3860          char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
3861          if (!add_shader_variable(ctx, shProg, resource_set,
3862                                   stage_mask, programInterface,
3863                                   var, field_name, field->type,
3864                                   use_implicit_location, field_location,
3865                                   false, outermost_struct_type))
3866             return false;
3867 
3868          field_location += field->type->count_attribute_slots(false);
3869       }
3870       return true;
3871    }
3872 
3873    case GLSL_TYPE_ARRAY: {
3874       /* The ARB_program_interface_query spec says:
3875        *
3876        *     "For an active variable declared as an array of basic types, a
3877        *      single entry will be generated, with its name string formed by
3878        *      concatenating the name of the array and the string "[0]"."
3879        *
3880        *     "For an active variable declared as an array of an aggregate data
3881        *      type (structures or arrays), a separate entry will be generated
3882        *      for each active array element, unless noted immediately below.
3883        *      The name of each entry is formed by concatenating the name of
3884        *      the array, the "[" character, an integer identifying the element
3885        *      number, and the "]" character.  These enumeration rules are
3886        *      applied recursively, treating each enumerated array element as a
3887        *      separate active variable."
3888        */
3889       const struct glsl_type *array_type = type->fields.array;
3890       if (array_type->base_type == GLSL_TYPE_STRUCT ||
3891           array_type->base_type == GLSL_TYPE_ARRAY) {
3892          unsigned elem_location = location;
3893          unsigned stride = inouts_share_location ? 0 :
3894                            array_type->count_attribute_slots(false);
3895          for (unsigned i = 0; i < type->length; i++) {
3896             char *elem = ralloc_asprintf(shProg, "%s[%d]", name, i);
3897             if (!add_shader_variable(ctx, shProg, resource_set,
3898                                      stage_mask, programInterface,
3899                                      var, elem, array_type,
3900                                      use_implicit_location, elem_location,
3901                                      false, outermost_struct_type))
3902                return false;
3903             elem_location += stride;
3904          }
3905          return true;
3906       }
3907       FALLTHROUGH;
3908    }
3909 
3910    default: {
3911       /* The ARB_program_interface_query spec says:
3912        *
3913        *     "For an active variable declared as a single instance of a basic
3914        *     type, a single entry will be generated, using the variable name
3915        *     from the shader source."
3916        */
3917       gl_shader_variable *sha_v =
3918          create_shader_variable(shProg, var, name, type, interface_type,
3919                                 use_implicit_location, location,
3920                                 outermost_struct_type);
3921       if (!sha_v)
3922          return false;
3923 
3924       return link_util_add_program_resource(shProg, resource_set,
3925                                             programInterface, sha_v, stage_mask);
3926    }
3927    }
3928 }
3929 
3930 static bool
inout_has_same_location(const ir_variable * var,unsigned stage)3931 inout_has_same_location(const ir_variable *var, unsigned stage)
3932 {
3933    if (!var->data.patch &&
3934        ((var->data.mode == ir_var_shader_out &&
3935          stage == MESA_SHADER_TESS_CTRL) ||
3936         (var->data.mode == ir_var_shader_in &&
3937          (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
3938           stage == MESA_SHADER_GEOMETRY))))
3939       return true;
3940    else
3941       return false;
3942 }
3943 
3944 static bool
add_interface_variables(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set,unsigned stage,GLenum programInterface)3945 add_interface_variables(const struct gl_context *ctx,
3946                         struct gl_shader_program *shProg,
3947                         struct set *resource_set,
3948                         unsigned stage, GLenum programInterface)
3949 {
3950    exec_list *ir = shProg->_LinkedShaders[stage]->ir;
3951 
3952    foreach_in_list(ir_instruction, node, ir) {
3953       ir_variable *var = node->as_variable();
3954 
3955       if (!var || var->data.how_declared == ir_var_hidden)
3956          continue;
3957 
3958       int loc_bias;
3959 
3960       switch (var->data.mode) {
3961       case ir_var_system_value:
3962       case ir_var_shader_in:
3963          if (programInterface != GL_PROGRAM_INPUT)
3964             continue;
3965          loc_bias = (stage == MESA_SHADER_VERTEX) ? int(VERT_ATTRIB_GENERIC0)
3966                                                   : int(VARYING_SLOT_VAR0);
3967          break;
3968       case ir_var_shader_out:
3969          if (programInterface != GL_PROGRAM_OUTPUT)
3970             continue;
3971          loc_bias = (stage == MESA_SHADER_FRAGMENT) ? int(FRAG_RESULT_DATA0)
3972                                                     : int(VARYING_SLOT_VAR0);
3973          break;
3974       default:
3975          continue;
3976       };
3977 
3978       if (var->data.patch)
3979          loc_bias = int(VARYING_SLOT_PATCH0);
3980 
3981       /* Skip packed varyings, packed varyings are handled separately
3982        * by add_packed_varyings.
3983        */
3984       if (strncmp(var->name, "packed:", 7) == 0)
3985          continue;
3986 
3987       /* Skip fragdata arrays, these are handled separately
3988        * by add_fragdata_arrays.
3989        */
3990       if (strncmp(var->name, "gl_out_FragData", 15) == 0)
3991          continue;
3992 
3993       const bool vs_input_or_fs_output =
3994          (stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
3995          (stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out);
3996 
3997       if (!add_shader_variable(ctx, shProg, resource_set,
3998                                1 << stage, programInterface,
3999                                var, var->name, var->type, vs_input_or_fs_output,
4000                                var->data.location - loc_bias,
4001                                inout_has_same_location(var, stage)))
4002          return false;
4003    }
4004    return true;
4005 }
4006 
4007 static bool
add_packed_varyings(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set,int stage,GLenum type)4008 add_packed_varyings(const struct gl_context *ctx,
4009                     struct gl_shader_program *shProg,
4010                     struct set *resource_set,
4011                     int stage, GLenum type)
4012 {
4013    struct gl_linked_shader *sh = shProg->_LinkedShaders[stage];
4014    GLenum iface;
4015 
4016    if (!sh || !sh->packed_varyings)
4017       return true;
4018 
4019    foreach_in_list(ir_instruction, node, sh->packed_varyings) {
4020       ir_variable *var = node->as_variable();
4021       if (var) {
4022          switch (var->data.mode) {
4023          case ir_var_shader_in:
4024             iface = GL_PROGRAM_INPUT;
4025             break;
4026          case ir_var_shader_out:
4027             iface = GL_PROGRAM_OUTPUT;
4028             break;
4029          default:
4030             unreachable("unexpected type");
4031          }
4032 
4033          if (type == iface) {
4034             const int stage_mask =
4035                build_stageref(shProg, var->name, var->data.mode);
4036             if (!add_shader_variable(ctx, shProg, resource_set,
4037                                      stage_mask,
4038                                      iface, var, var->name, var->type, false,
4039                                      var->data.location - VARYING_SLOT_VAR0,
4040                                      inout_has_same_location(var, stage)))
4041                return false;
4042          }
4043       }
4044    }
4045    return true;
4046 }
4047 
4048 static bool
add_fragdata_arrays(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set)4049 add_fragdata_arrays(const struct gl_context *ctx,
4050                     struct gl_shader_program *shProg,
4051                     struct set *resource_set)
4052 {
4053    struct gl_linked_shader *sh = shProg->_LinkedShaders[MESA_SHADER_FRAGMENT];
4054 
4055    if (!sh || !sh->fragdata_arrays)
4056       return true;
4057 
4058    foreach_in_list(ir_instruction, node, sh->fragdata_arrays) {
4059       ir_variable *var = node->as_variable();
4060       if (var) {
4061          assert(var->data.mode == ir_var_shader_out);
4062 
4063          if (!add_shader_variable(ctx, shProg, resource_set,
4064                                   1 << MESA_SHADER_FRAGMENT,
4065                                   GL_PROGRAM_OUTPUT, var, var->name, var->type,
4066                                   true, var->data.location - FRAG_RESULT_DATA0,
4067                                   false))
4068             return false;
4069       }
4070    }
4071    return true;
4072 }
4073 
4074 /**
4075  * Builds up a list of program resources that point to existing
4076  * resource data.
4077  */
4078 void
build_program_resource_list(struct gl_context * ctx,struct gl_shader_program * shProg,bool add_packed_varyings_only)4079 build_program_resource_list(struct gl_context *ctx,
4080                             struct gl_shader_program *shProg,
4081                             bool add_packed_varyings_only)
4082 {
4083    /* Rebuild resource list. */
4084    if (shProg->data->ProgramResourceList) {
4085       ralloc_free(shProg->data->ProgramResourceList);
4086       shProg->data->ProgramResourceList = NULL;
4087       shProg->data->NumProgramResourceList = 0;
4088    }
4089 
4090    int input_stage = MESA_SHADER_STAGES, output_stage = 0;
4091 
4092    /* Determine first input and final output stage. These are used to
4093     * detect which variables should be enumerated in the resource list
4094     * for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
4095     */
4096    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4097       if (!shProg->_LinkedShaders[i])
4098          continue;
4099       if (input_stage == MESA_SHADER_STAGES)
4100          input_stage = i;
4101       output_stage = i;
4102    }
4103 
4104    /* Empty shader, no resources. */
4105    if (input_stage == MESA_SHADER_STAGES && output_stage == 0)
4106       return;
4107 
4108    struct set *resource_set = _mesa_pointer_set_create(NULL);
4109 
4110    /* Program interface needs to expose varyings in case of SSO. */
4111    if (shProg->SeparateShader) {
4112       if (!add_packed_varyings(ctx, shProg, resource_set,
4113                                input_stage, GL_PROGRAM_INPUT))
4114          return;
4115 
4116       if (!add_packed_varyings(ctx, shProg, resource_set,
4117                                output_stage, GL_PROGRAM_OUTPUT))
4118          return;
4119    }
4120 
4121    if (add_packed_varyings_only) {
4122       _mesa_set_destroy(resource_set, NULL);
4123       return;
4124    }
4125 
4126    if (!add_fragdata_arrays(ctx, shProg, resource_set))
4127       return;
4128 
4129    /* Add inputs and outputs to the resource list. */
4130    if (!add_interface_variables(ctx, shProg, resource_set,
4131                                 input_stage, GL_PROGRAM_INPUT))
4132       return;
4133 
4134    if (!add_interface_variables(ctx, shProg, resource_set,
4135                                 output_stage, GL_PROGRAM_OUTPUT))
4136       return;
4137 
4138    if (shProg->last_vert_prog) {
4139       struct gl_transform_feedback_info *linked_xfb =
4140          shProg->last_vert_prog->sh.LinkedTransformFeedback;
4141 
4142       /* Add transform feedback varyings. */
4143       if (linked_xfb->NumVarying > 0) {
4144          for (int i = 0; i < linked_xfb->NumVarying; i++) {
4145             if (!link_util_add_program_resource(shProg, resource_set,
4146                                                 GL_TRANSFORM_FEEDBACK_VARYING,
4147                                                 &linked_xfb->Varyings[i], 0))
4148             return;
4149          }
4150       }
4151 
4152       /* Add transform feedback buffers. */
4153       for (unsigned i = 0; i < ctx->Const.MaxTransformFeedbackBuffers; i++) {
4154          if ((linked_xfb->ActiveBuffers >> i) & 1) {
4155             linked_xfb->Buffers[i].Binding = i;
4156             if (!link_util_add_program_resource(shProg, resource_set,
4157                                                 GL_TRANSFORM_FEEDBACK_BUFFER,
4158                                                 &linked_xfb->Buffers[i], 0))
4159             return;
4160          }
4161       }
4162    }
4163 
4164    int top_level_array_base_offset = -1;
4165    int top_level_array_size_in_bytes = -1;
4166    int second_element_offset = -1;
4167    int buffer_block_index = -1;
4168 
4169    /* Add uniforms from uniform storage. */
4170    for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
4171       /* Do not add uniforms internally used by Mesa. */
4172       if (shProg->data->UniformStorage[i].hidden)
4173          continue;
4174 
4175       bool is_shader_storage =
4176         shProg->data->UniformStorage[i].is_shader_storage;
4177       GLenum type = is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
4178       if (!link_util_should_add_buffer_variable(shProg,
4179                                                 &shProg->data->UniformStorage[i],
4180                                                 top_level_array_base_offset,
4181                                                 top_level_array_size_in_bytes,
4182                                                 second_element_offset,
4183                                                 buffer_block_index))
4184          continue;
4185 
4186       if (is_shader_storage) {
4187          /* From the OpenGL 4.6 specification, 7.3.1.1 Naming Active Resources:
4188           *
4189           *    "For an active shader storage block member declared as an array
4190           *    of an aggregate type, an entry will be generated only for the
4191           *    first array element, regardless of its type. Such block members
4192           *    are referred to as top-level arrays. If the block member is an
4193           *    aggregate type, the enumeration rules are then applied
4194           *    recursively."
4195           *
4196           * Below we update our tracking values used by
4197           * link_util_should_add_buffer_variable(). We only want to reset the
4198           * offsets once we have moved past the first element.
4199           */
4200          if (shProg->data->UniformStorage[i].offset >= second_element_offset) {
4201             top_level_array_base_offset =
4202                shProg->data->UniformStorage[i].offset;
4203 
4204             top_level_array_size_in_bytes =
4205                shProg->data->UniformStorage[i].top_level_array_size *
4206                shProg->data->UniformStorage[i].top_level_array_stride;
4207 
4208             /* Set or reset the second element offset. For non arrays this
4209              * will be set to -1.
4210              */
4211             second_element_offset = top_level_array_size_in_bytes ?
4212                top_level_array_base_offset +
4213                shProg->data->UniformStorage[i].top_level_array_stride : -1;
4214          }
4215 
4216          buffer_block_index = shProg->data->UniformStorage[i].block_index;
4217       }
4218 
4219       uint8_t stageref = shProg->data->UniformStorage[i].active_shader_mask;
4220       if (!link_util_add_program_resource(shProg, resource_set, type,
4221                                           &shProg->data->UniformStorage[i], stageref))
4222          return;
4223    }
4224 
4225    /* Add program uniform blocks. */
4226    for (unsigned i = 0; i < shProg->data->NumUniformBlocks; i++) {
4227       if (!link_util_add_program_resource(shProg, resource_set, GL_UNIFORM_BLOCK,
4228                                           &shProg->data->UniformBlocks[i], 0))
4229          return;
4230    }
4231 
4232    /* Add program shader storage blocks. */
4233    for (unsigned i = 0; i < shProg->data->NumShaderStorageBlocks; i++) {
4234       if (!link_util_add_program_resource(shProg, resource_set, GL_SHADER_STORAGE_BLOCK,
4235                                           &shProg->data->ShaderStorageBlocks[i], 0))
4236          return;
4237    }
4238 
4239    /* Add atomic counter buffers. */
4240    for (unsigned i = 0; i < shProg->data->NumAtomicBuffers; i++) {
4241       if (!link_util_add_program_resource(shProg, resource_set, GL_ATOMIC_COUNTER_BUFFER,
4242                                           &shProg->data->AtomicBuffers[i], 0))
4243          return;
4244    }
4245 
4246    for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
4247       GLenum type;
4248       if (!shProg->data->UniformStorage[i].hidden)
4249          continue;
4250 
4251       for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
4252          if (!shProg->data->UniformStorage[i].opaque[j].active ||
4253              !shProg->data->UniformStorage[i].type->is_subroutine())
4254             continue;
4255 
4256          type = _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
4257          /* add shader subroutines */
4258          if (!link_util_add_program_resource(shProg, resource_set,
4259                                              type, &shProg->data->UniformStorage[i], 0))
4260             return;
4261       }
4262    }
4263 
4264    unsigned mask = shProg->data->linked_stages;
4265    while (mask) {
4266       const int i = u_bit_scan(&mask);
4267       struct gl_program *p = shProg->_LinkedShaders[i]->Program;
4268 
4269       GLuint type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
4270       for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4271          if (!link_util_add_program_resource(shProg, resource_set,
4272                                              type, &p->sh.SubroutineFunctions[j], 0))
4273             return;
4274       }
4275    }
4276 
4277    _mesa_set_destroy(resource_set, NULL);
4278 }
4279 
4280 /**
4281  * This check is done to make sure we allow only constant expression
4282  * indexing and "constant-index-expression" (indexing with an expression
4283  * that includes loop induction variable).
4284  */
4285 static bool
validate_sampler_array_indexing(struct gl_context * ctx,struct gl_shader_program * prog)4286 validate_sampler_array_indexing(struct gl_context *ctx,
4287                                 struct gl_shader_program *prog)
4288 {
4289    dynamic_sampler_array_indexing_visitor v;
4290    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4291       if (prog->_LinkedShaders[i] == NULL)
4292          continue;
4293 
4294       bool no_dynamic_indexing =
4295          ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectSampler;
4296 
4297       /* Search for array derefs in shader. */
4298       v.run(prog->_LinkedShaders[i]->ir);
4299       if (v.uses_dynamic_sampler_array_indexing()) {
4300          const char *msg = "sampler arrays indexed with non-constant "
4301                            "expressions is forbidden in GLSL %s %u";
4302          /* Backend has indicated that it has no dynamic indexing support. */
4303          if (no_dynamic_indexing) {
4304             linker_error(prog, msg, prog->IsES ? "ES" : "",
4305                          prog->data->Version);
4306             return false;
4307          } else {
4308             linker_warning(prog, msg, prog->IsES ? "ES" : "",
4309                            prog->data->Version);
4310          }
4311       }
4312    }
4313    return true;
4314 }
4315 
4316 static void
link_assign_subroutine_types(struct gl_shader_program * prog)4317 link_assign_subroutine_types(struct gl_shader_program *prog)
4318 {
4319    unsigned mask = prog->data->linked_stages;
4320    while (mask) {
4321       const int i = u_bit_scan(&mask);
4322       gl_program *p = prog->_LinkedShaders[i]->Program;
4323 
4324       p->sh.MaxSubroutineFunctionIndex = 0;
4325       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
4326          ir_function *fn = node->as_function();
4327          if (!fn)
4328             continue;
4329 
4330          if (fn->is_subroutine)
4331             p->sh.NumSubroutineUniformTypes++;
4332 
4333          if (!fn->num_subroutine_types)
4334             continue;
4335 
4336          /* these should have been calculated earlier. */
4337          assert(fn->subroutine_index != -1);
4338          if (p->sh.NumSubroutineFunctions + 1 > MAX_SUBROUTINES) {
4339             linker_error(prog, "Too many subroutine functions declared.\n");
4340             return;
4341          }
4342          p->sh.SubroutineFunctions = reralloc(p, p->sh.SubroutineFunctions,
4343                                             struct gl_subroutine_function,
4344                                             p->sh.NumSubroutineFunctions + 1);
4345          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name = ralloc_strdup(p, fn->name);
4346          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
4347          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types =
4348             ralloc_array(p, const struct glsl_type *,
4349                          fn->num_subroutine_types);
4350 
4351          /* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
4352           * GLSL 4.5 spec:
4353           *
4354           *    "Each subroutine with an index qualifier in the shader must be
4355           *    given a unique index, otherwise a compile or link error will be
4356           *    generated."
4357           */
4358          for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4359             if (p->sh.SubroutineFunctions[j].index != -1 &&
4360                 p->sh.SubroutineFunctions[j].index == fn->subroutine_index) {
4361                linker_error(prog, "each subroutine index qualifier in the "
4362                             "shader must be unique\n");
4363                return;
4364             }
4365          }
4366          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].index =
4367             fn->subroutine_index;
4368 
4369          if (fn->subroutine_index > (int)p->sh.MaxSubroutineFunctionIndex)
4370             p->sh.MaxSubroutineFunctionIndex = fn->subroutine_index;
4371 
4372          for (int j = 0; j < fn->num_subroutine_types; j++)
4373             p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
4374          p->sh.NumSubroutineFunctions++;
4375       }
4376    }
4377 }
4378 
4379 static void
verify_subroutine_associated_funcs(struct gl_shader_program * prog)4380 verify_subroutine_associated_funcs(struct gl_shader_program *prog)
4381 {
4382    unsigned mask = prog->data->linked_stages;
4383    while (mask) {
4384       const int i = u_bit_scan(&mask);
4385       gl_program *p = prog->_LinkedShaders[i]->Program;
4386       glsl_symbol_table *symbols = prog->_LinkedShaders[i]->symbols;
4387 
4388       /* Section 6.1.2 (Subroutines) of the GLSL 4.00 spec says:
4389        *
4390        *   "A program will fail to compile or link if any shader
4391        *    or stage contains two or more functions with the same
4392        *    name if the name is associated with a subroutine type."
4393        */
4394       for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4395          unsigned definitions = 0;
4396          char *name = p->sh.SubroutineFunctions[j].name;
4397          ir_function *fn = symbols->get_function(name);
4398 
4399          /* Calculate number of function definitions with the same name */
4400          foreach_in_list(ir_function_signature, sig, &fn->signatures) {
4401             if (sig->is_defined) {
4402                if (++definitions > 1) {
4403                   linker_error(prog, "%s shader contains two or more function "
4404                                "definitions with name `%s', which is "
4405                                "associated with a subroutine type.\n",
4406                                _mesa_shader_stage_to_string(i),
4407                                fn->name);
4408                   return;
4409                }
4410             }
4411          }
4412       }
4413    }
4414 }
4415 
4416 
4417 static void
set_always_active_io(exec_list * ir,ir_variable_mode io_mode)4418 set_always_active_io(exec_list *ir, ir_variable_mode io_mode)
4419 {
4420    assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
4421 
4422    foreach_in_list(ir_instruction, node, ir) {
4423       ir_variable *const var = node->as_variable();
4424 
4425       if (var == NULL || var->data.mode != io_mode)
4426          continue;
4427 
4428       /* Don't set always active on builtins that haven't been redeclared */
4429       if (var->data.how_declared == ir_var_declared_implicitly)
4430          continue;
4431 
4432       var->data.always_active_io = true;
4433    }
4434 }
4435 
4436 /**
4437  * When separate shader programs are enabled, only input/outputs between
4438  * the stages of a multi-stage separate program can be safely removed
4439  * from the shader interface. Other inputs/outputs must remain active.
4440  */
4441 static void
disable_varying_optimizations_for_sso(struct gl_shader_program * prog)4442 disable_varying_optimizations_for_sso(struct gl_shader_program *prog)
4443 {
4444    unsigned first, last;
4445    assert(prog->SeparateShader);
4446 
4447    first = MESA_SHADER_STAGES;
4448    last = 0;
4449 
4450    /* Determine first and last stage. Excluding the compute stage */
4451    for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
4452       if (!prog->_LinkedShaders[i])
4453          continue;
4454       if (first == MESA_SHADER_STAGES)
4455          first = i;
4456       last = i;
4457    }
4458 
4459    if (first == MESA_SHADER_STAGES)
4460       return;
4461 
4462    for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
4463       gl_linked_shader *sh = prog->_LinkedShaders[stage];
4464       if (!sh)
4465          continue;
4466 
4467       /* Prevent the removal of inputs to the first and outputs from the last
4468        * stage, unless they are the initial pipeline inputs or final pipeline
4469        * outputs, respectively.
4470        *
4471        * The removal of IO between shaders in the same program is always
4472        * allowed.
4473        */
4474       if (stage == first && stage != MESA_SHADER_VERTEX)
4475          set_always_active_io(sh->ir, ir_var_shader_in);
4476       if (stage == last && stage != MESA_SHADER_FRAGMENT)
4477          set_always_active_io(sh->ir, ir_var_shader_out);
4478    }
4479 }
4480 
4481 static void
link_and_validate_uniforms(struct gl_context * ctx,struct gl_shader_program * prog)4482 link_and_validate_uniforms(struct gl_context *ctx,
4483                            struct gl_shader_program *prog)
4484 {
4485    assert(!ctx->Const.UseNIRGLSLLinker);
4486 
4487    update_array_sizes(prog);
4488    link_assign_uniform_locations(prog, ctx);
4489 
4490    if (prog->data->LinkStatus == LINKING_FAILURE)
4491       return;
4492 
4493    link_util_calculate_subroutine_compat(prog);
4494    link_util_check_uniform_resources(ctx, prog);
4495    link_util_check_subroutine_resources(prog);
4496    check_image_resources(ctx, prog);
4497    link_assign_atomic_counter_resources(ctx, prog);
4498    link_check_atomic_counter_resources(ctx, prog);
4499 }
4500 
4501 static bool
link_varyings_and_uniforms(unsigned first,unsigned last,struct gl_context * ctx,struct gl_shader_program * prog,void * mem_ctx)4502 link_varyings_and_uniforms(unsigned first, unsigned last,
4503                            struct gl_context *ctx,
4504                            struct gl_shader_program *prog, void *mem_ctx)
4505 {
4506    /* Mark all generic shader inputs and outputs as unpaired. */
4507    for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
4508       if (prog->_LinkedShaders[i] != NULL) {
4509          link_invalidate_variable_locations(prog->_LinkedShaders[i]->ir);
4510       }
4511    }
4512 
4513    unsigned prev = first;
4514    for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
4515       if (prog->_LinkedShaders[i] == NULL)
4516          continue;
4517 
4518       match_explicit_outputs_to_inputs(prog->_LinkedShaders[prev],
4519                                        prog->_LinkedShaders[i]);
4520       prev = i;
4521    }
4522 
4523    if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
4524                                             MESA_SHADER_VERTEX, true)) {
4525       return false;
4526    }
4527 
4528    if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
4529                                             MESA_SHADER_FRAGMENT, true)) {
4530       return false;
4531    }
4532 
4533    prog->last_vert_prog = NULL;
4534    for (int i = MESA_SHADER_GEOMETRY; i >= MESA_SHADER_VERTEX; i--) {
4535       if (prog->_LinkedShaders[i] == NULL)
4536          continue;
4537 
4538       prog->last_vert_prog = prog->_LinkedShaders[i]->Program;
4539       break;
4540    }
4541 
4542    if (!link_varyings(prog, first, last, ctx, mem_ctx))
4543       return false;
4544 
4545    if (!ctx->Const.UseNIRGLSLLinker)
4546       link_and_validate_uniforms(ctx, prog);
4547 
4548    if (!prog->data->LinkStatus)
4549       return false;
4550 
4551    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4552       if (prog->_LinkedShaders[i] == NULL)
4553          continue;
4554 
4555       const struct gl_shader_compiler_options *options =
4556          &ctx->Const.ShaderCompilerOptions[i];
4557 
4558       if (options->LowerBufferInterfaceBlocks)
4559          lower_ubo_reference(prog->_LinkedShaders[i],
4560                              options->ClampBlockIndicesToArrayBounds,
4561                              ctx->Const.UseSTD430AsDefaultPacking);
4562 
4563       if (i == MESA_SHADER_COMPUTE)
4564          lower_shared_reference(ctx, prog, prog->_LinkedShaders[i]);
4565 
4566       lower_vector_derefs(prog->_LinkedShaders[i]);
4567       do_vec_index_to_swizzle(prog->_LinkedShaders[i]->ir);
4568    }
4569 
4570    return true;
4571 }
4572 
4573 static void
linker_optimisation_loop(struct gl_context * ctx,exec_list * ir,unsigned stage)4574 linker_optimisation_loop(struct gl_context *ctx, exec_list *ir,
4575                          unsigned stage)
4576 {
4577       if (ctx->Const.GLSLOptimizeConservatively) {
4578          /* Run it just once. */
4579          do_common_optimization(ir, true, false,
4580                                 &ctx->Const.ShaderCompilerOptions[stage],
4581                                 ctx->Const.NativeIntegers);
4582       } else {
4583          /* Repeat it until it stops making changes. */
4584          while (do_common_optimization(ir, true, false,
4585                                        &ctx->Const.ShaderCompilerOptions[stage],
4586                                        ctx->Const.NativeIntegers))
4587             ;
4588       }
4589 }
4590 
4591 void
link_shaders(struct gl_context * ctx,struct gl_shader_program * prog)4592 link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
4593 {
4594    prog->data->LinkStatus = LINKING_SUCCESS; /* All error paths will set this to false */
4595    prog->data->Validated = false;
4596 
4597    /* Section 7.3 (Program Objects) of the OpenGL 4.5 Core Profile spec says:
4598     *
4599     *     "Linking can fail for a variety of reasons as specified in the
4600     *     OpenGL Shading Language Specification, as well as any of the
4601     *     following reasons:
4602     *
4603     *     - No shader objects are attached to program."
4604     *
4605     * The Compatibility Profile specification does not list the error.  In
4606     * Compatibility Profile missing shader stages are replaced by
4607     * fixed-function.  This applies to the case where all stages are
4608     * missing.
4609     */
4610    if (prog->NumShaders == 0) {
4611       if (ctx->API != API_OPENGL_COMPAT)
4612          linker_error(prog, "no shaders attached to the program\n");
4613       return;
4614    }
4615 
4616 #ifdef ENABLE_SHADER_CACHE
4617    if (shader_cache_read_program_metadata(ctx, prog))
4618       return;
4619 #endif
4620 
4621    void *mem_ctx = ralloc_context(NULL); // temporary linker context
4622 
4623    prog->ARB_fragment_coord_conventions_enable = false;
4624 
4625    /* Separate the shaders into groups based on their type.
4626     */
4627    struct gl_shader **shader_list[MESA_SHADER_STAGES];
4628    unsigned num_shaders[MESA_SHADER_STAGES];
4629 
4630    for (int i = 0; i < MESA_SHADER_STAGES; i++) {
4631       shader_list[i] = (struct gl_shader **)
4632          calloc(prog->NumShaders, sizeof(struct gl_shader *));
4633       num_shaders[i] = 0;
4634    }
4635 
4636    unsigned min_version = UINT_MAX;
4637    unsigned max_version = 0;
4638    for (unsigned i = 0; i < prog->NumShaders; i++) {
4639       min_version = MIN2(min_version, prog->Shaders[i]->Version);
4640       max_version = MAX2(max_version, prog->Shaders[i]->Version);
4641 
4642       if (!ctx->Const.AllowGLSLRelaxedES &&
4643           prog->Shaders[i]->IsES != prog->Shaders[0]->IsES) {
4644          linker_error(prog, "all shaders must use same shading "
4645                       "language version\n");
4646          goto done;
4647       }
4648 
4649       if (prog->Shaders[i]->ARB_fragment_coord_conventions_enable) {
4650          prog->ARB_fragment_coord_conventions_enable = true;
4651       }
4652 
4653       gl_shader_stage shader_type = prog->Shaders[i]->Stage;
4654       shader_list[shader_type][num_shaders[shader_type]] = prog->Shaders[i];
4655       num_shaders[shader_type]++;
4656    }
4657 
4658    /* In desktop GLSL, different shader versions may be linked together.  In
4659     * GLSL ES, all shader versions must be the same.
4660     */
4661    if (!ctx->Const.AllowGLSLRelaxedES && prog->Shaders[0]->IsES &&
4662        min_version != max_version) {
4663       linker_error(prog, "all shaders must use same shading "
4664                    "language version\n");
4665       goto done;
4666    }
4667 
4668    prog->data->Version = max_version;
4669    prog->IsES = prog->Shaders[0]->IsES;
4670 
4671    /* Some shaders have to be linked with some other shaders present.
4672     */
4673    if (!prog->SeparateShader) {
4674       if (num_shaders[MESA_SHADER_GEOMETRY] > 0 &&
4675           num_shaders[MESA_SHADER_VERTEX] == 0) {
4676          linker_error(prog, "Geometry shader must be linked with "
4677                       "vertex shader\n");
4678          goto done;
4679       }
4680       if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
4681           num_shaders[MESA_SHADER_VERTEX] == 0) {
4682          linker_error(prog, "Tessellation evaluation shader must be linked "
4683                       "with vertex shader\n");
4684          goto done;
4685       }
4686       if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
4687           num_shaders[MESA_SHADER_VERTEX] == 0) {
4688          linker_error(prog, "Tessellation control shader must be linked with "
4689                       "vertex shader\n");
4690          goto done;
4691       }
4692 
4693       /* Section 7.3 of the OpenGL ES 3.2 specification says:
4694        *
4695        *    "Linking can fail for [...] any of the following reasons:
4696        *
4697        *     * program contains an object to form a tessellation control
4698        *       shader [...] and [...] the program is not separable and
4699        *       contains no object to form a tessellation evaluation shader"
4700        *
4701        * The OpenGL spec is contradictory. It allows linking without a tess
4702        * eval shader, but that can only be used with transform feedback and
4703        * rasterization disabled. However, transform feedback isn't allowed
4704        * with GL_PATCHES, so it can't be used.
4705        *
4706        * More investigation showed that the idea of transform feedback after
4707        * a tess control shader was dropped, because some hw vendors couldn't
4708        * support tessellation without a tess eval shader, but the linker
4709        * section wasn't updated to reflect that.
4710        *
4711        * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
4712        * spec bug.
4713        *
4714        * Do what's reasonable and always require a tess eval shader if a tess
4715        * control shader is present.
4716        */
4717       if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
4718           num_shaders[MESA_SHADER_TESS_EVAL] == 0) {
4719          linker_error(prog, "Tessellation control shader must be linked with "
4720                       "tessellation evaluation shader\n");
4721          goto done;
4722       }
4723 
4724       if (prog->IsES) {
4725          if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
4726              num_shaders[MESA_SHADER_TESS_CTRL] == 0) {
4727             linker_error(prog, "GLSL ES requires non-separable programs "
4728                          "containing a tessellation evaluation shader to also "
4729                          "be linked with a tessellation control shader\n");
4730             goto done;
4731          }
4732       }
4733    }
4734 
4735    /* Compute shaders have additional restrictions. */
4736    if (num_shaders[MESA_SHADER_COMPUTE] > 0 &&
4737        num_shaders[MESA_SHADER_COMPUTE] != prog->NumShaders) {
4738       linker_error(prog, "Compute shaders may not be linked with any other "
4739                    "type of shader\n");
4740    }
4741 
4742    /* Link all shaders for a particular stage and validate the result.
4743     */
4744    for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
4745       if (num_shaders[stage] > 0) {
4746          gl_linked_shader *const sh =
4747             link_intrastage_shaders(mem_ctx, ctx, prog, shader_list[stage],
4748                                     num_shaders[stage], false);
4749 
4750          if (!prog->data->LinkStatus) {
4751             if (sh)
4752                _mesa_delete_linked_shader(ctx, sh);
4753             goto done;
4754          }
4755 
4756          switch (stage) {
4757          case MESA_SHADER_VERTEX:
4758             validate_vertex_shader_executable(prog, sh, ctx);
4759             break;
4760          case MESA_SHADER_TESS_CTRL:
4761             /* nothing to be done */
4762             break;
4763          case MESA_SHADER_TESS_EVAL:
4764             validate_tess_eval_shader_executable(prog, sh, ctx);
4765             break;
4766          case MESA_SHADER_GEOMETRY:
4767             validate_geometry_shader_executable(prog, sh, ctx);
4768             break;
4769          case MESA_SHADER_FRAGMENT:
4770             validate_fragment_shader_executable(prog, sh);
4771             break;
4772          }
4773          if (!prog->data->LinkStatus) {
4774             if (sh)
4775                _mesa_delete_linked_shader(ctx, sh);
4776             goto done;
4777          }
4778 
4779          prog->_LinkedShaders[stage] = sh;
4780          prog->data->linked_stages |= 1 << stage;
4781       }
4782    }
4783 
4784    /* Here begins the inter-stage linking phase.  Some initial validation is
4785     * performed, then locations are assigned for uniforms, attributes, and
4786     * varyings.
4787     */
4788    cross_validate_uniforms(ctx, prog);
4789    if (!prog->data->LinkStatus)
4790       goto done;
4791 
4792    unsigned first, last, prev;
4793 
4794    first = MESA_SHADER_STAGES;
4795    last = 0;
4796 
4797    /* Determine first and last stage. */
4798    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4799       if (!prog->_LinkedShaders[i])
4800          continue;
4801       if (first == MESA_SHADER_STAGES)
4802          first = i;
4803       last = i;
4804    }
4805 
4806    check_explicit_uniform_locations(ctx, prog);
4807    link_assign_subroutine_types(prog);
4808    verify_subroutine_associated_funcs(prog);
4809 
4810    if (!prog->data->LinkStatus)
4811       goto done;
4812 
4813    resize_tes_inputs(ctx, prog);
4814 
4815    /* Validate the inputs of each stage with the output of the preceding
4816     * stage.
4817     */
4818    prev = first;
4819    for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
4820       if (prog->_LinkedShaders[i] == NULL)
4821          continue;
4822 
4823       validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
4824                                        prog->_LinkedShaders[i]);
4825       if (!prog->data->LinkStatus)
4826          goto done;
4827 
4828       cross_validate_outputs_to_inputs(ctx, prog,
4829                                        prog->_LinkedShaders[prev],
4830                                        prog->_LinkedShaders[i]);
4831       if (!prog->data->LinkStatus)
4832          goto done;
4833 
4834       prev = i;
4835    }
4836 
4837    /* The cross validation of outputs/inputs above validates interstage
4838     * explicit locations. We need to do this also for the inputs in the first
4839     * stage and outputs of the last stage included in the program, since there
4840     * is no cross validation for these.
4841     */
4842    validate_first_and_last_interface_explicit_locations(ctx, prog,
4843                                                         (gl_shader_stage) first,
4844                                                         (gl_shader_stage) last);
4845 
4846    /* Cross-validate uniform blocks between shader stages */
4847    validate_interstage_uniform_blocks(prog, prog->_LinkedShaders);
4848    if (!prog->data->LinkStatus)
4849       goto done;
4850 
4851    for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
4852       if (prog->_LinkedShaders[i] != NULL)
4853          lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
4854    }
4855 
4856    if (prog->IsES && prog->data->Version == 100)
4857       if (!validate_invariant_builtins(prog,
4858             prog->_LinkedShaders[MESA_SHADER_VERTEX],
4859             prog->_LinkedShaders[MESA_SHADER_FRAGMENT]))
4860          goto done;
4861 
4862    /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
4863     * it before optimization because we want most of the checks to get
4864     * dropped thanks to constant propagation.
4865     *
4866     * This rule also applies to GLSL ES 3.00.
4867     */
4868    if (max_version >= (prog->IsES ? 300 : 130)) {
4869       struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
4870       if (sh) {
4871          lower_discard_flow(sh->ir);
4872       }
4873    }
4874 
4875    if (prog->SeparateShader)
4876       disable_varying_optimizations_for_sso(prog);
4877 
4878    /* Process UBOs */
4879    if (!interstage_cross_validate_uniform_blocks(prog, false))
4880       goto done;
4881 
4882    /* Process SSBOs */
4883    if (!interstage_cross_validate_uniform_blocks(prog, true))
4884       goto done;
4885 
4886    /* Do common optimization before assigning storage for attributes,
4887     * uniforms, and varyings.  Later optimization could possibly make
4888     * some of that unused.
4889     */
4890    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4891       if (prog->_LinkedShaders[i] == NULL)
4892          continue;
4893 
4894       detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
4895       if (!prog->data->LinkStatus)
4896          goto done;
4897 
4898       if (ctx->Const.ShaderCompilerOptions[i].LowerCombinedClipCullDistance) {
4899          lower_clip_cull_distance(prog, prog->_LinkedShaders[i]);
4900       }
4901 
4902       if (ctx->Const.LowerTessLevel) {
4903          lower_tess_level(prog->_LinkedShaders[i]);
4904       }
4905 
4906       /* Section 13.46 (Vertex Attribute Aliasing) of the OpenGL ES 3.2
4907        * specification says:
4908        *
4909        *    "In general, the behavior of GLSL ES should not depend on compiler
4910        *    optimizations which might be implementation-dependent. Name matching
4911        *    rules in most languages, including C++ from which GLSL ES is derived,
4912        *    are based on declarations rather than use.
4913        *
4914        *    RESOLUTION: The existence of aliasing is determined by declarations
4915        *    present after preprocessing."
4916        *
4917        * Because of this rule, we do a 'dry-run' of attribute assignment for
4918        * vertex shader inputs here.
4919        */
4920       if (prog->IsES && i == MESA_SHADER_VERTEX) {
4921          if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
4922                                                   MESA_SHADER_VERTEX, false)) {
4923             goto done;
4924          }
4925       }
4926 
4927       /* Call opts before lowering const arrays to uniforms so we can const
4928        * propagate any elements accessed directly.
4929        */
4930       linker_optimisation_loop(ctx, prog->_LinkedShaders[i]->ir, i);
4931 
4932       /* Call opts after lowering const arrays to copy propagate things. */
4933       if (ctx->Const.GLSLLowerConstArrays &&
4934           lower_const_arrays_to_uniforms(prog->_LinkedShaders[i]->ir, i,
4935                                          ctx->Const.Program[i].MaxUniformComponents))
4936          linker_optimisation_loop(ctx, prog->_LinkedShaders[i]->ir, i);
4937 
4938    }
4939 
4940    /* Validation for special cases where we allow sampler array indexing
4941     * with loop induction variable. This check emits a warning or error
4942     * depending if backend can handle dynamic indexing.
4943     */
4944    if ((!prog->IsES && prog->data->Version < 130) ||
4945        (prog->IsES && prog->data->Version < 300)) {
4946       if (!validate_sampler_array_indexing(ctx, prog))
4947          goto done;
4948    }
4949 
4950    /* Check and validate stream emissions in geometry shaders */
4951    validate_geometry_shader_emissions(ctx, prog);
4952 
4953    store_fragdepth_layout(prog);
4954 
4955    if(!link_varyings_and_uniforms(first, last, ctx, prog, mem_ctx))
4956       goto done;
4957 
4958    /* Linking varyings can cause some extra, useless swizzles to be generated
4959     * due to packing and unpacking.
4960     */
4961    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4962       if (prog->_LinkedShaders[i] == NULL)
4963          continue;
4964 
4965       optimize_swizzles(prog->_LinkedShaders[i]->ir);
4966    }
4967 
4968    /* OpenGL ES < 3.1 requires that a vertex shader and a fragment shader both
4969     * be present in a linked program. GL_ARB_ES2_compatibility doesn't say
4970     * anything about shader linking when one of the shaders (vertex or
4971     * fragment shader) is absent. So, the extension shouldn't change the
4972     * behavior specified in GLSL specification.
4973     *
4974     * From OpenGL ES 3.1 specification (7.3 Program Objects):
4975     *     "Linking can fail for a variety of reasons as specified in the
4976     *     OpenGL ES Shading Language Specification, as well as any of the
4977     *     following reasons:
4978     *
4979     *     ...
4980     *
4981     *     * program contains objects to form either a vertex shader or
4982     *       fragment shader, and program is not separable, and does not
4983     *       contain objects to form both a vertex shader and fragment
4984     *       shader."
4985     *
4986     * However, the only scenario in 3.1+ where we don't require them both is
4987     * when we have a compute shader. For example:
4988     *
4989     * - No shaders is a link error.
4990     * - Geom or Tess without a Vertex shader is a link error which means we
4991     *   always require a Vertex shader and hence a Fragment shader.
4992     * - Finally a Compute shader linked with any other stage is a link error.
4993     */
4994    if (!prog->SeparateShader && ctx->API == API_OPENGLES2 &&
4995        num_shaders[MESA_SHADER_COMPUTE] == 0) {
4996       if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
4997          linker_error(prog, "program lacks a vertex shader\n");
4998       } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
4999          linker_error(prog, "program lacks a fragment shader\n");
5000       }
5001    }
5002 
5003 done:
5004    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
5005       free(shader_list[i]);
5006       if (prog->_LinkedShaders[i] == NULL)
5007          continue;
5008 
5009       /* Do a final validation step to make sure that the IR wasn't
5010        * invalidated by any modifications performed after intrastage linking.
5011        */
5012       validate_ir_tree(prog->_LinkedShaders[i]->ir);
5013 
5014       /* Retain any live IR, but trash the rest. */
5015       reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
5016 
5017       /* The symbol table in the linked shaders may contain references to
5018        * variables that were removed (e.g., unused uniforms).  Since it may
5019        * contain junk, there is no possible valid use.  Delete it and set the
5020        * pointer to NULL.
5021        */
5022       delete prog->_LinkedShaders[i]->symbols;
5023       prog->_LinkedShaders[i]->symbols = NULL;
5024    }
5025 
5026    ralloc_free(mem_ctx);
5027 }
5028