1 /*
2  * Copyright © 2019 Google, Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file lower_precision.cpp
26  */
27 
28 #include "main/macros.h"
29 #include "main/mtypes.h"
30 #include "compiler/glsl_types.h"
31 #include "ir.h"
32 #include "ir_builder.h"
33 #include "ir_optimization.h"
34 #include "ir_rvalue_visitor.h"
35 #include "util/half_float.h"
36 #include "util/set.h"
37 #include "util/hash_table.h"
38 #include <vector>
39 
40 namespace {
41 
42 class find_precision_visitor : public ir_rvalue_enter_visitor {
43 public:
44    find_precision_visitor(const struct gl_shader_compiler_options *options);
45    ~find_precision_visitor();
46 
47    virtual void handle_rvalue(ir_rvalue **rvalue);
48    virtual ir_visitor_status visit_enter(ir_call *ir);
49 
50    ir_function_signature *map_builtin(ir_function_signature *sig);
51 
52    /* Set of rvalues that can be lowered. This will be filled in by
53     * find_lowerable_rvalues_visitor. Only the root node of a lowerable section
54     * will be added to this set.
55     */
56    struct set *lowerable_rvalues;
57 
58    /**
59     * A mapping of builtin signature functions to lowered versions. This is
60     * filled in lazily when a lowered version is needed.
61     */
62    struct hash_table *lowered_builtins;
63    /**
64     * A temporary hash table only used in order to clone functions.
65     */
66    struct hash_table *clone_ht;
67 
68    void *lowered_builtin_mem_ctx;
69 
70    const struct gl_shader_compiler_options *options;
71 };
72 
73 class find_lowerable_rvalues_visitor : public ir_hierarchical_visitor {
74 public:
75    enum can_lower_state {
76       UNKNOWN,
77       CANT_LOWER,
78       SHOULD_LOWER,
79    };
80 
81    enum parent_relation {
82       /* The parent performs a further operation involving the result from the
83        * child and can be lowered along with it.
84        */
85       COMBINED_OPERATION,
86       /* The parent instruction’s operation is independent of the child type so
87        * the child should be lowered separately.
88        */
89       INDEPENDENT_OPERATION,
90    };
91 
92    struct stack_entry {
93       ir_instruction *instr;
94       enum can_lower_state state;
95       /* List of child rvalues that can be lowered. When this stack entry is
96        * popped, if this node itself can’t be lowered than all of the children
97        * are root nodes to lower so we will add them to lowerable_rvalues.
98        * Otherwise if this node can also be lowered then we won’t add the
99        * children because we only want to add the topmost lowerable nodes to
100        * lowerable_rvalues and the children will be lowered as part of lowering
101        * this node.
102        */
103       std::vector<ir_instruction *> lowerable_children;
104    };
105 
106    find_lowerable_rvalues_visitor(struct set *result,
107                                   const struct gl_shader_compiler_options *options);
108 
109    static void stack_enter(class ir_instruction *ir, void *data);
110    static void stack_leave(class ir_instruction *ir, void *data);
111 
112    virtual ir_visitor_status visit(ir_constant *ir);
113    virtual ir_visitor_status visit(ir_dereference_variable *ir);
114 
115    virtual ir_visitor_status visit_enter(ir_dereference_record *ir);
116    virtual ir_visitor_status visit_enter(ir_dereference_array *ir);
117    virtual ir_visitor_status visit_enter(ir_texture *ir);
118    virtual ir_visitor_status visit_enter(ir_expression *ir);
119 
120    virtual ir_visitor_status visit_leave(ir_assignment *ir);
121    virtual ir_visitor_status visit_leave(ir_call *ir);
122 
123    can_lower_state handle_precision(const glsl_type *type,
124                                     int precision) const;
125 
126    static parent_relation get_parent_relation(ir_instruction *parent,
127                                               ir_instruction *child);
128 
129    std::vector<stack_entry> stack;
130    struct set *lowerable_rvalues;
131    const struct gl_shader_compiler_options *options;
132 
133    void pop_stack_entry();
134    void add_lowerable_children(const stack_entry &entry);
135 };
136 
137 class lower_precision_visitor : public ir_rvalue_visitor {
138 public:
139    virtual void handle_rvalue(ir_rvalue **rvalue);
140    virtual ir_visitor_status visit_enter(ir_dereference_array *);
141    virtual ir_visitor_status visit_enter(ir_dereference_record *);
142    virtual ir_visitor_status visit_enter(ir_call *ir);
143    virtual ir_visitor_status visit_enter(ir_texture *ir);
144    virtual ir_visitor_status visit_leave(ir_expression *);
145 };
146 
147 static bool
can_lower_type(const struct gl_shader_compiler_options * options,const glsl_type * type)148 can_lower_type(const struct gl_shader_compiler_options *options,
149                const glsl_type *type)
150 {
151    /* Don’t lower any expressions involving non-float types except bool and
152     * texture samplers. This will rule out operations that change the type such
153     * as conversion to ints. Instead it will end up lowering the arguments
154     * instead and adding a final conversion to float32. We want to handle
155     * boolean types so that it will do comparisons as 16-bit.
156     */
157 
158    switch (type->without_array()->base_type) {
159    /* TODO: should we do anything for these two with regard to Int16 vs FP16
160     * support?
161     */
162    case GLSL_TYPE_BOOL:
163    case GLSL_TYPE_SAMPLER:
164    case GLSL_TYPE_IMAGE:
165       return true;
166 
167    case GLSL_TYPE_FLOAT:
168       return options->LowerPrecisionFloat16;
169 
170    case GLSL_TYPE_UINT:
171    case GLSL_TYPE_INT:
172       return options->LowerPrecisionInt16;
173 
174    default:
175       return false;
176    }
177 }
178 
find_lowerable_rvalues_visitor(struct set * res,const struct gl_shader_compiler_options * opts)179 find_lowerable_rvalues_visitor::find_lowerable_rvalues_visitor(struct set *res,
180                                  const struct gl_shader_compiler_options *opts)
181 {
182    lowerable_rvalues = res;
183    options = opts;
184    callback_enter = stack_enter;
185    callback_leave = stack_leave;
186    data_enter = this;
187    data_leave = this;
188 }
189 
190 void
stack_enter(class ir_instruction * ir,void * data)191 find_lowerable_rvalues_visitor::stack_enter(class ir_instruction *ir,
192                                             void *data)
193 {
194    find_lowerable_rvalues_visitor *state =
195       (find_lowerable_rvalues_visitor *) data;
196 
197    /* Add a new stack entry for this instruction */
198    stack_entry entry;
199 
200    entry.instr = ir;
201    entry.state = state->in_assignee ? CANT_LOWER : UNKNOWN;
202 
203    state->stack.push_back(entry);
204 }
205 
206 void
add_lowerable_children(const stack_entry & entry)207 find_lowerable_rvalues_visitor::add_lowerable_children(const stack_entry &entry)
208 {
209    /* We can’t lower this node so if there were any pending children then they
210     * are all root lowerable nodes and we should add them to the set.
211     */
212    for (auto &it : entry.lowerable_children)
213       _mesa_set_add(lowerable_rvalues, it);
214 }
215 
216 void
pop_stack_entry()217 find_lowerable_rvalues_visitor::pop_stack_entry()
218 {
219    const stack_entry &entry = stack.back();
220 
221    if (stack.size() >= 2) {
222       /* Combine this state into the parent state, unless the parent operation
223        * doesn’t have any relation to the child operations
224        */
225       stack_entry &parent = stack.end()[-2];
226       parent_relation rel = get_parent_relation(parent.instr, entry.instr);
227 
228       if (rel == COMBINED_OPERATION) {
229          switch (entry.state) {
230          case CANT_LOWER:
231             parent.state = CANT_LOWER;
232             break;
233          case SHOULD_LOWER:
234             if (parent.state == UNKNOWN)
235                parent.state = SHOULD_LOWER;
236             break;
237          case UNKNOWN:
238             break;
239          }
240       }
241    }
242 
243    if (entry.state == SHOULD_LOWER) {
244       ir_rvalue *rv = entry.instr->as_rvalue();
245 
246       if (rv == NULL) {
247          add_lowerable_children(entry);
248       } else if (stack.size() >= 2) {
249          stack_entry &parent = stack.end()[-2];
250 
251          switch (get_parent_relation(parent.instr, rv)) {
252          case COMBINED_OPERATION:
253             /* We only want to add the toplevel lowerable instructions to the
254              * lowerable set. Therefore if there is a parent then instead of
255              * adding this instruction to the set we will queue depending on
256              * the result of the parent instruction.
257              */
258             parent.lowerable_children.push_back(entry.instr);
259             break;
260          case INDEPENDENT_OPERATION:
261             _mesa_set_add(lowerable_rvalues, rv);
262             break;
263          }
264       } else {
265          /* This is a toplevel node so add it directly to the lowerable
266           * set.
267           */
268          _mesa_set_add(lowerable_rvalues, rv);
269       }
270    } else if (entry.state == CANT_LOWER) {
271       add_lowerable_children(entry);
272    }
273 
274    stack.pop_back();
275 }
276 
277 void
stack_leave(class ir_instruction * ir,void * data)278 find_lowerable_rvalues_visitor::stack_leave(class ir_instruction *ir,
279                                             void *data)
280 {
281    find_lowerable_rvalues_visitor *state =
282       (find_lowerable_rvalues_visitor *) data;
283 
284    state->pop_stack_entry();
285 }
286 
287 enum find_lowerable_rvalues_visitor::can_lower_state
handle_precision(const glsl_type * type,int precision) const288 find_lowerable_rvalues_visitor::handle_precision(const glsl_type *type,
289                                                  int precision) const
290 {
291    if (!can_lower_type(options, type))
292       return CANT_LOWER;
293 
294    switch (precision) {
295    case GLSL_PRECISION_NONE:
296       return UNKNOWN;
297    case GLSL_PRECISION_HIGH:
298       return CANT_LOWER;
299    case GLSL_PRECISION_MEDIUM:
300    case GLSL_PRECISION_LOW:
301       return SHOULD_LOWER;
302    }
303 
304    return CANT_LOWER;
305 }
306 
307 enum find_lowerable_rvalues_visitor::parent_relation
get_parent_relation(ir_instruction * parent,ir_instruction * child)308 find_lowerable_rvalues_visitor::get_parent_relation(ir_instruction *parent,
309                                                     ir_instruction *child)
310 {
311    /* If the parent is a dereference instruction then the only child could be
312     * for example an array dereference and that should be lowered independently
313     * of the parent.
314     */
315    if (parent->as_dereference())
316       return INDEPENDENT_OPERATION;
317 
318    /* The precision of texture sampling depend on the precision of the sampler.
319     * The rest of the arguments don’t matter so we can treat it as an
320     * independent operation.
321     */
322    if (parent->as_texture())
323       return INDEPENDENT_OPERATION;
324 
325    return COMBINED_OPERATION;
326 }
327 
328 ir_visitor_status
visit(ir_constant * ir)329 find_lowerable_rvalues_visitor::visit(ir_constant *ir)
330 {
331    stack_enter(ir, this);
332 
333    if (!can_lower_type(options, ir->type))
334       stack.back().state = CANT_LOWER;
335 
336    stack_leave(ir, this);
337 
338    return visit_continue;
339 }
340 
341 ir_visitor_status
visit(ir_dereference_variable * ir)342 find_lowerable_rvalues_visitor::visit(ir_dereference_variable *ir)
343 {
344    stack_enter(ir, this);
345 
346    if (stack.back().state == UNKNOWN)
347       stack.back().state = handle_precision(ir->type, ir->precision());
348 
349    stack_leave(ir, this);
350 
351    return visit_continue;
352 }
353 
354 ir_visitor_status
visit_enter(ir_dereference_record * ir)355 find_lowerable_rvalues_visitor::visit_enter(ir_dereference_record *ir)
356 {
357    ir_hierarchical_visitor::visit_enter(ir);
358 
359    if (stack.back().state == UNKNOWN)
360       stack.back().state = handle_precision(ir->type, ir->precision());
361 
362    return visit_continue;
363 }
364 
365 ir_visitor_status
visit_enter(ir_dereference_array * ir)366 find_lowerable_rvalues_visitor::visit_enter(ir_dereference_array *ir)
367 {
368    ir_hierarchical_visitor::visit_enter(ir);
369 
370    if (stack.back().state == UNKNOWN)
371       stack.back().state = handle_precision(ir->type, ir->precision());
372 
373    return visit_continue;
374 }
375 
376 ir_visitor_status
visit_enter(ir_texture * ir)377 find_lowerable_rvalues_visitor::visit_enter(ir_texture *ir)
378 {
379    ir_hierarchical_visitor::visit_enter(ir);
380 
381    /* The precision of the sample value depends on the precision of the
382     * sampler.
383     */
384    stack.back().state = handle_precision(ir->type,
385                                          ir->sampler->precision());
386    return visit_continue;
387 }
388 
389 ir_visitor_status
visit_enter(ir_expression * ir)390 find_lowerable_rvalues_visitor::visit_enter(ir_expression *ir)
391 {
392    ir_hierarchical_visitor::visit_enter(ir);
393 
394    if (!can_lower_type(options, ir->type))
395       stack.back().state = CANT_LOWER;
396 
397    /* Don't lower precision for derivative calculations */
398    if (!options->LowerPrecisionDerivatives &&
399        (ir->operation == ir_unop_dFdx ||
400         ir->operation == ir_unop_dFdx_coarse ||
401         ir->operation == ir_unop_dFdx_fine ||
402         ir->operation == ir_unop_dFdy ||
403         ir->operation == ir_unop_dFdy_coarse ||
404         ir->operation == ir_unop_dFdy_fine)) {
405       stack.back().state = CANT_LOWER;
406    }
407 
408    return visit_continue;
409 }
410 
411 static bool
function_always_returns_mediump_or_lowp(const char * name)412 function_always_returns_mediump_or_lowp(const char *name)
413 {
414    return !strcmp(name, "bitCount") ||
415           !strcmp(name, "findLSB") ||
416           !strcmp(name, "findMSB") ||
417           !strcmp(name, "unpackHalf2x16") ||
418           !strcmp(name, "unpackUnorm4x8") ||
419           !strcmp(name, "unpackSnorm4x8");
420 }
421 
422 static unsigned
handle_call(ir_call * ir,const struct set * lowerable_rvalues)423 handle_call(ir_call *ir, const struct set *lowerable_rvalues)
424 {
425    /* The intrinsic call is inside the wrapper imageLoad function that will
426     * be inlined. We have to handle both of them.
427     */
428    if (ir->callee->intrinsic_id == ir_intrinsic_image_load ||
429        (ir->callee->is_builtin() &&
430         !strcmp(ir->callee_name(), "imageLoad"))) {
431       ir_rvalue *param = (ir_rvalue*)ir->actual_parameters.get_head();
432       ir_variable *resource = param->variable_referenced();
433 
434       assert(ir->callee->return_precision == GLSL_PRECISION_NONE);
435       assert(resource->type->without_array()->is_image());
436 
437       /* GLSL ES 3.20 requires that images have a precision modifier, but if
438        * you set one, it doesn't do anything, because all intrinsics are
439        * defined with highp. This seems to be a spec bug.
440        *
441        * In theory we could set the return value to mediump if the image
442        * format has a lower precision. This appears to be the most sensible
443        * thing to do.
444        */
445       const struct util_format_description *desc =
446          util_format_description(resource->data.image_format);
447       int i =
448          util_format_get_first_non_void_channel(resource->data.image_format);
449       bool mediump;
450 
451       assert(i >= 0);
452 
453       if (desc->channel[i].pure_integer ||
454           desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
455          mediump = desc->channel[i].size <= 16;
456       else
457          mediump = desc->channel[i].size <= 10; /* unorm/snorm */
458 
459       return mediump ? GLSL_PRECISION_MEDIUM : GLSL_PRECISION_HIGH;
460    }
461 
462    /* Return the declared precision for user-defined functions. */
463    if (!ir->callee->is_builtin())
464       return ir->callee->return_precision;
465 
466    /* Handle special calls. */
467    if (ir->callee->is_builtin() && ir->actual_parameters.length()) {
468       ir_rvalue *param = (ir_rvalue*)ir->actual_parameters.get_head();
469       ir_variable *var = param->variable_referenced();
470 
471       /* Handle builtin wrappers around ir_texture opcodes. These wrappers will
472        * be inlined by lower_precision() if we return true here, so that we can
473        * get to ir_texture later and do proper lowering.
474        *
475        * We should lower the type of the return value if the sampler type
476        * uses lower precision. The function parameters don't matter.
477        */
478       if (var && var->type->without_array()->is_sampler()) {
479          /* textureSize always returns highp. */
480          if (!strcmp(ir->callee_name(), "textureSize"))
481             return GLSL_PRECISION_HIGH;
482 
483          return var->data.precision;
484       }
485    }
486 
487    if (/* Parameters are always highp: */
488        !strcmp(ir->callee_name(), "floatBitsToInt") ||
489        !strcmp(ir->callee_name(), "floatBitsToUint") ||
490        !strcmp(ir->callee_name(), "intBitsToFloat") ||
491        !strcmp(ir->callee_name(), "uintBitsToFloat") ||
492        !strcmp(ir->callee_name(), "bitfieldReverse") ||
493        !strcmp(ir->callee_name(), "frexp") ||
494        !strcmp(ir->callee_name(), "ldexp") ||
495        /* Parameters and outputs are always highp: */
496        /* TODO: The operations are highp, but carry and borrow outputs are lowp. */
497        !strcmp(ir->callee_name(), "uaddCarry") ||
498        !strcmp(ir->callee_name(), "usubBorrow") ||
499        !strcmp(ir->callee_name(), "imulExtended") ||
500        !strcmp(ir->callee_name(), "umulExtended") ||
501        !strcmp(ir->callee_name(), "unpackUnorm2x16") ||
502        !strcmp(ir->callee_name(), "unpackSnorm2x16") ||
503        /* Outputs are highp: */
504        !strcmp(ir->callee_name(), "packUnorm2x16") ||
505        !strcmp(ir->callee_name(), "packSnorm2x16") ||
506        /* Parameters are mediump and outputs are highp. The parameters should
507         * be optimized in NIR, not here, e.g:
508         * - packHalf2x16 can just be a bitcast from f16vec2 to uint32
509         * - Other opcodes don't have to convert parameters to highp if the hw
510         *   has f16 versions. Optimize in NIR accordingly.
511         */
512        !strcmp(ir->callee_name(), "packHalf2x16") ||
513        !strcmp(ir->callee_name(), "packUnorm4x8") ||
514        !strcmp(ir->callee_name(), "packSnorm4x8") ||
515        /* Atomic functions are not lowered. */
516        strstr(ir->callee_name(), "atomic") == ir->callee_name())
517       return GLSL_PRECISION_HIGH;
518 
519    assert(ir->callee->return_precision == GLSL_PRECISION_NONE);
520 
521    /* Number of parameters to check if they are lowerable. */
522    unsigned check_parameters = ir->actual_parameters.length();
523 
524    /* Interpolation functions only consider the precision of the interpolant. */
525    /* Bitfield functions ignore the precision of "offset" and "bits". */
526    if (!strcmp(ir->callee_name(), "interpolateAtOffset") ||
527        !strcmp(ir->callee_name(), "interpolateAtSample") ||
528        !strcmp(ir->callee_name(), "bitfieldExtract")) {
529       check_parameters = 1;
530    } else if (!strcmp(ir->callee_name(), "bitfieldInsert")) {
531       check_parameters = 2;
532    } if (function_always_returns_mediump_or_lowp(ir->callee_name())) {
533       /* These only lower the return value. Parameters keep their precision,
534        * which is preserved in map_builtin.
535        */
536       check_parameters = 0;
537    }
538 
539    /* If the call is to a builtin, then the function won’t have a return
540     * precision and we should determine it from the precision of the arguments.
541     */
542    foreach_in_list(ir_rvalue, param, &ir->actual_parameters) {
543       if (!check_parameters)
544          break;
545 
546       if (!param->as_constant() &&
547           _mesa_set_search(lowerable_rvalues, param) == NULL)
548          return GLSL_PRECISION_HIGH;
549 
550       --check_parameters;
551    }
552 
553    return GLSL_PRECISION_MEDIUM;
554 }
555 
556 ir_visitor_status
visit_leave(ir_call * ir)557 find_lowerable_rvalues_visitor::visit_leave(ir_call *ir)
558 {
559    ir_hierarchical_visitor::visit_leave(ir);
560 
561    /* Special case for handling temporary variables generated by the compiler
562     * for function calls. If we assign to one of these using a function call
563     * that has a lowerable return type then we can assume the temporary
564     * variable should have a medium precision too.
565     */
566 
567    /* Do nothing if the return type is void. */
568    if (!ir->return_deref)
569       return visit_continue;
570 
571    ir_variable *var = ir->return_deref->variable_referenced();
572 
573    assert(var->data.mode == ir_var_temporary);
574 
575    unsigned return_precision = handle_call(ir, lowerable_rvalues);
576 
577    can_lower_state lower_state =
578       handle_precision(var->type, return_precision);
579 
580    if (lower_state == SHOULD_LOWER) {
581       /* There probably shouldn’t be any situations where multiple ir_call
582        * instructions write to the same temporary?
583        */
584       assert(var->data.precision == GLSL_PRECISION_NONE);
585       var->data.precision = GLSL_PRECISION_MEDIUM;
586    } else {
587       var->data.precision = GLSL_PRECISION_HIGH;
588    }
589 
590    return visit_continue;
591 }
592 
593 ir_visitor_status
visit_leave(ir_assignment * ir)594 find_lowerable_rvalues_visitor::visit_leave(ir_assignment *ir)
595 {
596    ir_hierarchical_visitor::visit_leave(ir);
597 
598    /* Special case for handling temporary variables generated by the compiler.
599     * If we assign to one of these using a lowered precision then we can assume
600     * the temporary variable should have a medium precision too.
601     */
602    ir_variable *var = ir->lhs->variable_referenced();
603 
604    if (var->data.mode == ir_var_temporary) {
605       if (_mesa_set_search(lowerable_rvalues, ir->rhs)) {
606          /* Only override the precision if this is the first assignment. For
607           * temporaries such as the ones generated for the ?: operator there
608           * can be multiple assignments with different precisions. This way we
609           * get the highest precision of all of the assignments.
610           */
611          if (var->data.precision == GLSL_PRECISION_NONE)
612             var->data.precision = GLSL_PRECISION_MEDIUM;
613       } else if (!ir->rhs->as_constant()) {
614          var->data.precision = GLSL_PRECISION_HIGH;
615       }
616    }
617 
618    return visit_continue;
619 }
620 
621 void
find_lowerable_rvalues(const struct gl_shader_compiler_options * options,exec_list * instructions,struct set * result)622 find_lowerable_rvalues(const struct gl_shader_compiler_options *options,
623                        exec_list *instructions,
624                        struct set *result)
625 {
626    find_lowerable_rvalues_visitor v(result, options);
627 
628    visit_list_elements(&v, instructions);
629 
630    assert(v.stack.empty());
631 }
632 
633 static const glsl_type *
convert_type(bool up,const glsl_type * type)634 convert_type(bool up, const glsl_type *type)
635 {
636    if (type->is_array()) {
637       return glsl_type::get_array_instance(convert_type(up, type->fields.array),
638                                            type->array_size(),
639                                            type->explicit_stride);
640    }
641 
642    glsl_base_type new_base_type;
643 
644    if (up) {
645       switch (type->base_type) {
646       case GLSL_TYPE_FLOAT16:
647          new_base_type = GLSL_TYPE_FLOAT;
648          break;
649       case GLSL_TYPE_INT16:
650          new_base_type = GLSL_TYPE_INT;
651          break;
652       case GLSL_TYPE_UINT16:
653          new_base_type = GLSL_TYPE_UINT;
654          break;
655       default:
656          unreachable("invalid type");
657          return NULL;
658       }
659    } else {
660       switch (type->base_type) {
661       case GLSL_TYPE_FLOAT:
662          new_base_type = GLSL_TYPE_FLOAT16;
663          break;
664       case GLSL_TYPE_INT:
665          new_base_type = GLSL_TYPE_INT16;
666          break;
667       case GLSL_TYPE_UINT:
668          new_base_type = GLSL_TYPE_UINT16;
669          break;
670       default:
671          unreachable("invalid type");
672          return NULL;
673       }
674    }
675 
676    return glsl_type::get_instance(new_base_type,
677                                   type->vector_elements,
678                                   type->matrix_columns,
679                                   type->explicit_stride,
680                                   type->interface_row_major);
681 }
682 
683 static const glsl_type *
lower_glsl_type(const glsl_type * type)684 lower_glsl_type(const glsl_type *type)
685 {
686    return convert_type(false, type);
687 }
688 
689 static ir_rvalue *
convert_precision(bool up,ir_rvalue * ir)690 convert_precision(bool up, ir_rvalue *ir)
691 {
692    unsigned op;
693 
694    if (up) {
695       switch (ir->type->base_type) {
696       case GLSL_TYPE_FLOAT16:
697          op = ir_unop_f162f;
698          break;
699       case GLSL_TYPE_INT16:
700          op = ir_unop_i2i;
701          break;
702       case GLSL_TYPE_UINT16:
703          op = ir_unop_u2u;
704          break;
705       default:
706          unreachable("invalid type");
707          return NULL;
708       }
709    } else {
710       switch (ir->type->base_type) {
711       case GLSL_TYPE_FLOAT:
712          op = ir_unop_f2fmp;
713          break;
714       case GLSL_TYPE_INT:
715          op = ir_unop_i2imp;
716          break;
717       case GLSL_TYPE_UINT:
718          op = ir_unop_u2ump;
719          break;
720       default:
721          unreachable("invalid type");
722          return NULL;
723       }
724    }
725 
726    const glsl_type *desired_type = convert_type(up, ir->type);
727    void *mem_ctx = ralloc_parent(ir);
728    return new(mem_ctx) ir_expression(op, desired_type, ir, NULL);
729 }
730 
731 void
handle_rvalue(ir_rvalue ** rvalue)732 lower_precision_visitor::handle_rvalue(ir_rvalue **rvalue)
733 {
734    ir_rvalue *ir = *rvalue;
735 
736    if (ir == NULL)
737       return;
738 
739    if (ir->as_dereference()) {
740       if (!ir->type->is_boolean())
741          *rvalue = convert_precision(false, ir);
742    } else if (ir->type->is_32bit()) {
743       ir->type = lower_glsl_type(ir->type);
744 
745       ir_constant *const_ir = ir->as_constant();
746 
747       if (const_ir) {
748          ir_constant_data value;
749 
750          if (ir->type->base_type == GLSL_TYPE_FLOAT16) {
751             for (unsigned i = 0; i < ARRAY_SIZE(value.f16); i++)
752                value.f16[i] = _mesa_float_to_half(const_ir->value.f[i]);
753          } else if (ir->type->base_type == GLSL_TYPE_INT16) {
754             for (unsigned i = 0; i < ARRAY_SIZE(value.i16); i++)
755                value.i16[i] = const_ir->value.i[i];
756          } else if (ir->type->base_type == GLSL_TYPE_UINT16) {
757             for (unsigned i = 0; i < ARRAY_SIZE(value.u16); i++)
758                value.u16[i] = const_ir->value.u[i];
759          } else {
760             unreachable("invalid type");
761          }
762 
763          const_ir->value = value;
764       }
765    }
766 }
767 
768 ir_visitor_status
visit_enter(ir_dereference_record * ir)769 lower_precision_visitor::visit_enter(ir_dereference_record *ir)
770 {
771    /* We don’t want to lower the variable */
772    return visit_continue_with_parent;
773 }
774 
775 ir_visitor_status
visit_enter(ir_dereference_array * ir)776 lower_precision_visitor::visit_enter(ir_dereference_array *ir)
777 {
778    /* We don’t want to convert the array index or the variable. If the array
779     * index itself is lowerable that will be handled separately.
780     */
781    return visit_continue_with_parent;
782 }
783 
784 ir_visitor_status
visit_enter(ir_call * ir)785 lower_precision_visitor::visit_enter(ir_call *ir)
786 {
787    /* We don’t want to convert the arguments. These will be handled separately.
788     */
789    return visit_continue_with_parent;
790 }
791 
792 ir_visitor_status
visit_enter(ir_texture * ir)793 lower_precision_visitor::visit_enter(ir_texture *ir)
794 {
795    /* We don’t want to convert the arguments. These will be handled separately.
796     */
797    return visit_continue_with_parent;
798 }
799 
800 ir_visitor_status
visit_leave(ir_expression * ir)801 lower_precision_visitor::visit_leave(ir_expression *ir)
802 {
803    ir_rvalue_visitor::visit_leave(ir);
804 
805    /* If the expression is a conversion operation to or from bool then fix the
806     * operation.
807     */
808    switch (ir->operation) {
809    case ir_unop_b2f:
810       ir->operation = ir_unop_b2f16;
811       break;
812    case ir_unop_f2b:
813       ir->operation = ir_unop_f162b;
814       break;
815    case ir_unop_b2i:
816    case ir_unop_i2b:
817       /* Nothing to do - they both support int16. */
818       break;
819    default:
820       break;
821    }
822 
823    return visit_continue;
824 }
825 
826 void
handle_rvalue(ir_rvalue ** rvalue)827 find_precision_visitor::handle_rvalue(ir_rvalue **rvalue)
828 {
829    /* Checking the precision of rvalue can be lowered first throughout
830     * find_lowerable_rvalues_visitor.
831     * Once it found the precision of rvalue can be lowered, then we can
832     * add conversion f2fmp, etc. through lower_precision_visitor.
833     */
834    if (*rvalue == NULL)
835       return;
836 
837    struct set_entry *entry = _mesa_set_search(lowerable_rvalues, *rvalue);
838 
839    if (!entry)
840       return;
841 
842    _mesa_set_remove(lowerable_rvalues, entry);
843 
844    /* If the entire expression is just a variable dereference then trying to
845     * lower it will just directly add pointless to and from conversions without
846     * any actual operation in-between. Although these will eventually get
847     * optimised out, avoiding generating them here also avoids breaking inout
848     * parameters to functions.
849     */
850    if ((*rvalue)->as_dereference())
851       return;
852 
853    lower_precision_visitor v;
854 
855    (*rvalue)->accept(&v);
856    v.handle_rvalue(rvalue);
857 
858    /* We don’t need to add the final conversion if the final type has been
859     * converted to bool
860     */
861    if ((*rvalue)->type->base_type != GLSL_TYPE_BOOL) {
862       *rvalue = convert_precision(true, *rvalue);
863    }
864 }
865 
866 ir_visitor_status
visit_enter(ir_call * ir)867 find_precision_visitor::visit_enter(ir_call *ir)
868 {
869    ir_rvalue_enter_visitor::visit_enter(ir);
870 
871    ir_variable *return_var =
872       ir->return_deref ? ir->return_deref->variable_referenced() : NULL;
873 
874    /* Don't do anything for image_load here. We have only changed the return
875     * value to mediump/lowp, so that following instructions can use reduced
876     * precision.
877     *
878     * The return value type of the intrinsic itself isn't changed here, but
879     * can be changed in NIR if all users use the *2*mp opcode.
880     */
881    if (ir->callee->intrinsic_id == ir_intrinsic_image_load)
882       return visit_continue;
883 
884    /* If this is a call to a builtin and the find_lowerable_rvalues_visitor
885     * overrode the precision of the temporary return variable, then we can
886     * replace the builtin implementation with a lowered version.
887     */
888 
889    if (!ir->callee->is_builtin() ||
890        ir->callee->is_intrinsic() ||
891        return_var == NULL ||
892        (return_var->data.precision != GLSL_PRECISION_MEDIUM &&
893         return_var->data.precision != GLSL_PRECISION_LOW))
894       return visit_continue;
895 
896    ir->callee = map_builtin(ir->callee);
897    ir->generate_inline(ir);
898    ir->remove();
899 
900    return visit_continue_with_parent;
901 }
902 
903 ir_function_signature *
map_builtin(ir_function_signature * sig)904 find_precision_visitor::map_builtin(ir_function_signature *sig)
905 {
906    if (lowered_builtins == NULL) {
907       lowered_builtins = _mesa_pointer_hash_table_create(NULL);
908       clone_ht =_mesa_pointer_hash_table_create(NULL);
909       lowered_builtin_mem_ctx = ralloc_context(NULL);
910    } else {
911       struct hash_entry *entry = _mesa_hash_table_search(lowered_builtins, sig);
912       if (entry)
913          return (ir_function_signature *) entry->data;
914    }
915 
916    ir_function_signature *lowered_sig =
917       sig->clone(lowered_builtin_mem_ctx, clone_ht);
918 
919    /* Functions that always return mediump or lowp should keep their
920     * parameters intact, because they can be highp. NIR can lower
921     * the up-conversion for parameters if needed.
922     */
923    if (!function_always_returns_mediump_or_lowp(sig->function_name())) {
924       foreach_in_list(ir_variable, param, &lowered_sig->parameters) {
925          param->data.precision = GLSL_PRECISION_MEDIUM;
926       }
927    }
928 
929    lower_precision(options, &lowered_sig->body);
930 
931    _mesa_hash_table_clear(clone_ht, NULL);
932 
933    _mesa_hash_table_insert(lowered_builtins, sig, lowered_sig);
934 
935    return lowered_sig;
936 }
937 
find_precision_visitor(const struct gl_shader_compiler_options * options)938 find_precision_visitor::find_precision_visitor(const struct gl_shader_compiler_options *options)
939    : lowerable_rvalues(_mesa_pointer_set_create(NULL)),
940      lowered_builtins(NULL),
941      clone_ht(NULL),
942      lowered_builtin_mem_ctx(NULL),
943      options(options)
944 {
945 }
946 
~find_precision_visitor()947 find_precision_visitor::~find_precision_visitor()
948 {
949    _mesa_set_destroy(lowerable_rvalues, NULL);
950 
951    if (lowered_builtins) {
952       _mesa_hash_table_destroy(lowered_builtins, NULL);
953       _mesa_hash_table_destroy(clone_ht, NULL);
954       ralloc_free(lowered_builtin_mem_ctx);
955    }
956 }
957 
958 /* Lowering opcodes to 16 bits is not enough for programs with control flow
959  * (and the ?: operator, which is represented by if-then-else in the IR),
960  * because temporary variables, which are used for passing values between
961  * code blocks, are not lowered, resulting in 32-bit phis in NIR.
962  *
963  * First change the variable types to 16 bits, then change all ir_dereference
964  * types to 16 bits.
965  */
966 class lower_variables_visitor : public ir_rvalue_enter_visitor {
967 public:
lower_variables_visitor(const struct gl_shader_compiler_options * options)968    lower_variables_visitor(const struct gl_shader_compiler_options *options)
969       : options(options) {
970       lower_vars = _mesa_pointer_set_create(NULL);
971    }
972 
~lower_variables_visitor()973    virtual ~lower_variables_visitor()
974    {
975       _mesa_set_destroy(lower_vars, NULL);
976    }
977 
978    virtual ir_visitor_status visit(ir_variable *var);
979    virtual ir_visitor_status visit_enter(ir_assignment *ir);
980    virtual ir_visitor_status visit_enter(ir_return *ir);
981    virtual ir_visitor_status visit_enter(ir_call *ir);
982    virtual void handle_rvalue(ir_rvalue **rvalue);
983 
984    void fix_types_in_deref_chain(ir_dereference *ir);
985    void convert_split_assignment(ir_dereference *lhs, ir_rvalue *rhs,
986                                  bool insert_before);
987 
988    const struct gl_shader_compiler_options *options;
989    set *lower_vars;
990 };
991 
992 static void
lower_constant(ir_constant * ir)993 lower_constant(ir_constant *ir)
994 {
995    if (ir->type->is_array()) {
996       for (int i = 0; i < ir->type->array_size(); i++)
997          lower_constant(ir->get_array_element(i));
998 
999       ir->type = lower_glsl_type(ir->type);
1000       return;
1001    }
1002 
1003    ir->type = lower_glsl_type(ir->type);
1004    ir_constant_data value;
1005 
1006    if (ir->type->base_type == GLSL_TYPE_FLOAT16) {
1007       for (unsigned i = 0; i < ARRAY_SIZE(value.f16); i++)
1008          value.f16[i] = _mesa_float_to_half(ir->value.f[i]);
1009    } else if (ir->type->base_type == GLSL_TYPE_INT16) {
1010       for (unsigned i = 0; i < ARRAY_SIZE(value.i16); i++)
1011          value.i16[i] = ir->value.i[i];
1012    } else if (ir->type->base_type == GLSL_TYPE_UINT16) {
1013       for (unsigned i = 0; i < ARRAY_SIZE(value.u16); i++)
1014          value.u16[i] = ir->value.u[i];
1015    } else {
1016       unreachable("invalid type");
1017    }
1018 
1019    ir->value = value;
1020 }
1021 
1022 ir_visitor_status
visit(ir_variable * var)1023 lower_variables_visitor::visit(ir_variable *var)
1024 {
1025    if ((var->data.mode != ir_var_temporary &&
1026         var->data.mode != ir_var_auto &&
1027         /* Lower uniforms but not UBOs. */
1028         (var->data.mode != ir_var_uniform ||
1029          var->is_in_buffer_block() ||
1030          !(options->LowerPrecisionFloat16Uniforms &&
1031            var->type->without_array()->base_type == GLSL_TYPE_FLOAT))) ||
1032        !var->type->without_array()->is_32bit() ||
1033        (var->data.precision != GLSL_PRECISION_MEDIUM &&
1034         var->data.precision != GLSL_PRECISION_LOW) ||
1035        !can_lower_type(options, var->type))
1036       return visit_continue;
1037 
1038    /* Lower constant initializers. */
1039    if (var->constant_value &&
1040        var->type == var->constant_value->type) {
1041       if (!options->LowerPrecisionConstants)
1042          return visit_continue;
1043       var->constant_value =
1044          var->constant_value->clone(ralloc_parent(var), NULL);
1045       lower_constant(var->constant_value);
1046    }
1047 
1048    if (var->constant_initializer &&
1049        var->type == var->constant_initializer->type) {
1050       if (!options->LowerPrecisionConstants)
1051          return visit_continue;
1052       var->constant_initializer =
1053          var->constant_initializer->clone(ralloc_parent(var), NULL);
1054       lower_constant(var->constant_initializer);
1055    }
1056 
1057    var->type = lower_glsl_type(var->type);
1058    _mesa_set_add(lower_vars, var);
1059 
1060    return visit_continue;
1061 }
1062 
1063 void
fix_types_in_deref_chain(ir_dereference * ir)1064 lower_variables_visitor::fix_types_in_deref_chain(ir_dereference *ir)
1065 {
1066    assert(ir->type->without_array()->is_32bit());
1067    assert(_mesa_set_search(lower_vars, ir->variable_referenced()));
1068 
1069    /* Fix the type in the dereference node. */
1070    ir->type = lower_glsl_type(ir->type);
1071 
1072    /* If it's an array, fix the types in the whole dereference chain. */
1073    for (ir_dereference_array *deref_array = ir->as_dereference_array();
1074         deref_array;
1075         deref_array = deref_array->array->as_dereference_array()) {
1076       assert(deref_array->array->type->without_array()->is_32bit());
1077       deref_array->array->type = lower_glsl_type(deref_array->array->type);
1078    }
1079 }
1080 
1081 void
convert_split_assignment(ir_dereference * lhs,ir_rvalue * rhs,bool insert_before)1082 lower_variables_visitor::convert_split_assignment(ir_dereference *lhs,
1083                                                   ir_rvalue *rhs,
1084                                                   bool insert_before)
1085 {
1086    void *mem_ctx = ralloc_parent(lhs);
1087 
1088    if (lhs->type->is_array()) {
1089       for (unsigned i = 0; i < lhs->type->length; i++) {
1090          ir_dereference *l, *r;
1091 
1092          l = new(mem_ctx) ir_dereference_array(lhs->clone(mem_ctx, NULL),
1093                                                new(mem_ctx) ir_constant(i));
1094          r = new(mem_ctx) ir_dereference_array(rhs->clone(mem_ctx, NULL),
1095                                                new(mem_ctx) ir_constant(i));
1096          convert_split_assignment(l, r, insert_before);
1097       }
1098       return;
1099    }
1100 
1101    assert(lhs->type->is_16bit() || lhs->type->is_32bit());
1102    assert(rhs->type->is_16bit() || rhs->type->is_32bit());
1103    assert(lhs->type->is_16bit() != rhs->type->is_16bit());
1104 
1105    ir_assignment *assign =
1106       new(mem_ctx) ir_assignment(lhs, convert_precision(lhs->type->is_32bit(), rhs));
1107 
1108    if (insert_before)
1109       base_ir->insert_before(assign);
1110    else
1111       base_ir->insert_after(assign);
1112 }
1113 
1114 ir_visitor_status
visit_enter(ir_assignment * ir)1115 lower_variables_visitor::visit_enter(ir_assignment *ir)
1116 {
1117    ir_dereference *lhs = ir->lhs;
1118    ir_variable *var = lhs->variable_referenced();
1119    ir_dereference *rhs_deref = ir->rhs->as_dereference();
1120    ir_variable *rhs_var = rhs_deref ? rhs_deref->variable_referenced() : NULL;
1121    ir_constant *rhs_const = ir->rhs->as_constant();
1122 
1123    /* Legalize array assignments between lowered and non-lowered variables. */
1124    if (lhs->type->is_array() &&
1125        (rhs_var || rhs_const) &&
1126        (!rhs_var ||
1127         (var &&
1128          var->type->without_array()->is_16bit() !=
1129          rhs_var->type->without_array()->is_16bit())) &&
1130        (!rhs_const ||
1131         (var &&
1132          var->type->without_array()->is_16bit() &&
1133          rhs_const->type->without_array()->is_32bit()))) {
1134       assert(ir->rhs->type->is_array());
1135 
1136       /* Fix array assignments from lowered to non-lowered. */
1137       if (rhs_var && _mesa_set_search(lower_vars, rhs_var)) {
1138          fix_types_in_deref_chain(rhs_deref);
1139          /* Convert to 32 bits for LHS. */
1140          convert_split_assignment(lhs, rhs_deref, true);
1141          ir->remove();
1142          return visit_continue;
1143       }
1144 
1145       /* Fix array assignments from non-lowered to lowered. */
1146       if (var &&
1147           _mesa_set_search(lower_vars, var) &&
1148           ir->rhs->type->without_array()->is_32bit()) {
1149          fix_types_in_deref_chain(lhs);
1150          /* Convert to 16 bits for LHS. */
1151          convert_split_assignment(lhs, ir->rhs, true);
1152          ir->remove();
1153          return visit_continue;
1154       }
1155    }
1156 
1157    /* Fix assignment types. */
1158    if (var &&
1159        _mesa_set_search(lower_vars, var)) {
1160       /* Fix the LHS type. */
1161       if (lhs->type->without_array()->is_32bit())
1162          fix_types_in_deref_chain(lhs);
1163 
1164       /* Fix the RHS type if it's a lowered variable. */
1165       if (rhs_var &&
1166           _mesa_set_search(lower_vars, rhs_var) &&
1167           rhs_deref->type->without_array()->is_32bit())
1168          fix_types_in_deref_chain(rhs_deref);
1169 
1170       /* Fix the RHS type if it's a non-array expression. */
1171       if (ir->rhs->type->is_32bit()) {
1172          ir_expression *expr = ir->rhs->as_expression();
1173 
1174          /* Convert the RHS to the LHS type. */
1175          if (expr &&
1176              (expr->operation == ir_unop_f162f ||
1177               expr->operation == ir_unop_i2i ||
1178               expr->operation == ir_unop_u2u) &&
1179              expr->operands[0]->type->is_16bit()) {
1180             /* If there is an "up" conversion, just remove it.
1181              * This is optional. We could as well execute the else statement and
1182              * let NIR eliminate the up+down conversions.
1183              */
1184             ir->rhs = expr->operands[0];
1185          } else {
1186             /* Add a "down" conversion operation to fix the type of RHS. */
1187             ir->rhs = convert_precision(false, ir->rhs);
1188          }
1189       }
1190    }
1191 
1192    return ir_rvalue_enter_visitor::visit_enter(ir);
1193 }
1194 
1195 ir_visitor_status
visit_enter(ir_return * ir)1196 lower_variables_visitor::visit_enter(ir_return *ir)
1197 {
1198    void *mem_ctx = ralloc_parent(ir);
1199 
1200    ir_dereference *deref = ir->value ? ir->value->as_dereference() : NULL;
1201    if (deref) {
1202       ir_variable *var = deref->variable_referenced();
1203 
1204       /* Fix the type of the return value. */
1205       if (var &&
1206           _mesa_set_search(lower_vars, var) &&
1207           deref->type->without_array()->is_32bit()) {
1208          /* Create a 32-bit temporary variable. */
1209          ir_variable *new_var =
1210             new(mem_ctx) ir_variable(deref->type, "lowerp", ir_var_temporary);
1211          base_ir->insert_before(new_var);
1212 
1213          /* Fix types in dereferences. */
1214          fix_types_in_deref_chain(deref);
1215 
1216          /* Convert to 32 bits for the return value. */
1217          convert_split_assignment(new(mem_ctx) ir_dereference_variable(new_var),
1218                                   deref, true);
1219          ir->value = new(mem_ctx) ir_dereference_variable(new_var);
1220       }
1221    }
1222 
1223    return ir_rvalue_enter_visitor::visit_enter(ir);
1224 }
1225 
handle_rvalue(ir_rvalue ** rvalue)1226 void lower_variables_visitor::handle_rvalue(ir_rvalue **rvalue)
1227 {
1228    ir_rvalue *ir = *rvalue;
1229 
1230    if (in_assignee || ir == NULL)
1231       return;
1232 
1233    ir_expression *expr = ir->as_expression();
1234    ir_dereference *expr_op0_deref = expr ? expr->operands[0]->as_dereference() : NULL;
1235 
1236    /* Remove f2fmp(float16). Same for int16 and uint16. */
1237    if (expr &&
1238        expr_op0_deref &&
1239        (expr->operation == ir_unop_f2fmp ||
1240         expr->operation == ir_unop_i2imp ||
1241         expr->operation == ir_unop_u2ump ||
1242         expr->operation == ir_unop_f2f16 ||
1243         expr->operation == ir_unop_i2i ||
1244         expr->operation == ir_unop_u2u) &&
1245        expr->type->without_array()->is_16bit() &&
1246        expr_op0_deref->type->without_array()->is_32bit() &&
1247        expr_op0_deref->variable_referenced() &&
1248        _mesa_set_search(lower_vars, expr_op0_deref->variable_referenced())) {
1249       fix_types_in_deref_chain(expr_op0_deref);
1250 
1251       /* Remove f2fmp/i2imp/u2ump. */
1252       *rvalue = expr_op0_deref;
1253       return;
1254    }
1255 
1256    ir_dereference *deref = ir->as_dereference();
1257 
1258    if (deref) {
1259       ir_variable *var = deref->variable_referenced();
1260 
1261       /* var can be NULL if we are dereferencing ir_constant. */
1262       if (var &&
1263           _mesa_set_search(lower_vars, var) &&
1264           deref->type->without_array()->is_32bit()) {
1265          void *mem_ctx = ralloc_parent(ir);
1266 
1267          /* Create a 32-bit temporary variable. */
1268          ir_variable *new_var =
1269             new(mem_ctx) ir_variable(deref->type, "lowerp", ir_var_temporary);
1270          base_ir->insert_before(new_var);
1271 
1272          /* Fix types in dereferences. */
1273          fix_types_in_deref_chain(deref);
1274 
1275          /* Convert to 32 bits for the rvalue. */
1276          convert_split_assignment(new(mem_ctx) ir_dereference_variable(new_var),
1277                                   deref, true);
1278          *rvalue = new(mem_ctx) ir_dereference_variable(new_var);
1279       }
1280    }
1281 }
1282 
1283 ir_visitor_status
visit_enter(ir_call * ir)1284 lower_variables_visitor::visit_enter(ir_call *ir)
1285 {
1286    void *mem_ctx = ralloc_parent(ir);
1287 
1288    /* We can't pass 16-bit variables as 32-bit inout/out parameters. */
1289    foreach_two_lists(formal_node, &ir->callee->parameters,
1290                      actual_node, &ir->actual_parameters) {
1291       ir_dereference *param_deref =
1292          ((ir_rvalue *)actual_node)->as_dereference();
1293       ir_variable *param = (ir_variable *)formal_node;
1294 
1295       if (!param_deref)
1296             continue;
1297 
1298       ir_variable *var = param_deref->variable_referenced();
1299 
1300       /* var can be NULL if we are dereferencing ir_constant. */
1301       if (var &&
1302           _mesa_set_search(lower_vars, var) &&
1303           param->type->without_array()->is_32bit()) {
1304          fix_types_in_deref_chain(param_deref);
1305 
1306          /* Create a 32-bit temporary variable for the parameter. */
1307          ir_variable *new_var =
1308             new(mem_ctx) ir_variable(param->type, "lowerp", ir_var_temporary);
1309          base_ir->insert_before(new_var);
1310 
1311          /* Replace the parameter. */
1312          actual_node->replace_with(new(mem_ctx) ir_dereference_variable(new_var));
1313 
1314          if (param->data.mode == ir_var_function_in ||
1315              param->data.mode == ir_var_function_inout) {
1316             /* Convert to 32 bits for passing in. */
1317             convert_split_assignment(new(mem_ctx) ir_dereference_variable(new_var),
1318                                      param_deref->clone(mem_ctx, NULL), true);
1319          }
1320          if (param->data.mode == ir_var_function_out ||
1321              param->data.mode == ir_var_function_inout) {
1322             /* Convert to 16 bits after returning. */
1323             convert_split_assignment(param_deref,
1324                                      new(mem_ctx) ir_dereference_variable(new_var),
1325                                      false);
1326          }
1327       }
1328    }
1329 
1330    /* Fix the type of return value dereferencies. */
1331    ir_dereference_variable *ret_deref = ir->return_deref;
1332    ir_variable *ret_var = ret_deref ? ret_deref->variable_referenced() : NULL;
1333 
1334    if (ret_var &&
1335        _mesa_set_search(lower_vars, ret_var) &&
1336        ret_deref->type->without_array()->is_32bit()) {
1337       /* Create a 32-bit temporary variable. */
1338       ir_variable *new_var =
1339          new(mem_ctx) ir_variable(ir->callee->return_type, "lowerp",
1340                                   ir_var_temporary);
1341       base_ir->insert_before(new_var);
1342 
1343       /* Replace the return variable. */
1344       ret_deref->var = new_var;
1345 
1346       /* Convert to 16 bits after returning. */
1347       convert_split_assignment(new(mem_ctx) ir_dereference_variable(ret_var),
1348                                new(mem_ctx) ir_dereference_variable(new_var),
1349                                false);
1350    }
1351 
1352    return ir_rvalue_enter_visitor::visit_enter(ir);
1353 }
1354 
1355 }
1356 
1357 void
lower_precision(const struct gl_shader_compiler_options * options,exec_list * instructions)1358 lower_precision(const struct gl_shader_compiler_options *options,
1359                 exec_list *instructions)
1360 {
1361    find_precision_visitor v(options);
1362    find_lowerable_rvalues(options, instructions, v.lowerable_rvalues);
1363    visit_list_elements(&v, instructions);
1364 
1365    lower_variables_visitor vars(options);
1366    visit_list_elements(&vars, instructions);
1367 }
1368