1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file lower_vec_index_to_cond_assign.cpp
26  *
27  * Turns indexing into vector types to a series of conditional moves
28  * of each channel's swizzle into a temporary.
29  *
30  * Most GPUs don't have a native way to do this operation, and this
31  * works around that.  For drivers using both this pass and
32  * ir_vec_index_to_swizzle, there's a risk that this pass will happen
33  * before sufficient constant folding to find that the array index is
34  * constant.  However, we hope that other optimization passes,
35  * particularly constant folding of assignment conditions and copy
36  * propagation, will result in the same code in the end.
37  */
38 
39 #include "ir.h"
40 #include "ir_visitor.h"
41 #include "ir_optimization.h"
42 #include "compiler/glsl_types.h"
43 #include "ir_builder.h"
44 
45 using namespace ir_builder;
46 
47 namespace {
48 
49 /**
50  * Visitor class for replacing expressions with ir_constant values.
51  */
52 
53 class ir_vec_index_to_cond_assign_visitor : public ir_hierarchical_visitor {
54 public:
ir_vec_index_to_cond_assign_visitor()55    ir_vec_index_to_cond_assign_visitor()
56       : progress(false)
57    {
58       /* empty */
59    }
60 
61    ir_rvalue *convert_vec_index_to_cond_assign(void *mem_ctx,
62                                                ir_rvalue *orig_vector,
63                                                ir_rvalue *orig_index,
64                                                const glsl_type *type);
65 
66    ir_rvalue *convert_vector_extract_to_cond_assign(ir_rvalue *ir);
67 
68    virtual ir_visitor_status visit_enter(ir_expression *);
69    virtual ir_visitor_status visit_enter(ir_swizzle *);
70    virtual ir_visitor_status visit_leave(ir_assignment *);
71    virtual ir_visitor_status visit_enter(ir_return *);
72    virtual ir_visitor_status visit_enter(ir_call *);
73    virtual ir_visitor_status visit_enter(ir_if *);
74 
75    bool progress;
76 };
77 
78 } /* anonymous namespace */
79 
80 ir_rvalue *
convert_vec_index_to_cond_assign(void * mem_ctx,ir_rvalue * orig_vector,ir_rvalue * orig_index,const glsl_type * type)81 ir_vec_index_to_cond_assign_visitor::convert_vec_index_to_cond_assign(void *mem_ctx,
82                                                                       ir_rvalue *orig_vector,
83                                                                       ir_rvalue *orig_index,
84                                                                       const glsl_type *type)
85 {
86    exec_list list;
87    ir_factory body(&list, base_ir);
88 
89    /* Store the index to a temporary to avoid reusing its tree. */
90    assert(orig_index->type == glsl_type::int_type ||
91           orig_index->type == glsl_type::uint_type);
92    ir_variable *const index =
93       body.make_temp(orig_index->type, "vec_index_tmp_i");
94 
95    body.emit(assign(index, orig_index));
96 
97    /* Store the value inside a temp, thus avoiding matrixes duplication */
98    ir_variable *const value =
99       body.make_temp(orig_vector->type, "vec_value_tmp");
100 
101    body.emit(assign(value, orig_vector));
102 
103 
104    /* Temporary where we store whichever value we swizzle out. */
105    ir_variable *const var = body.make_temp(type, "vec_index_tmp_v");
106 
107    /* Generate a single comparison condition "mask" for all of the components
108     * in the vector.
109     */
110    ir_variable *const cond =
111       compare_index_block(body, index, 0, orig_vector->type->vector_elements);
112 
113    /* Generate a conditional move of each vector element to the temp. */
114    for (unsigned i = 0; i < orig_vector->type->vector_elements; i++)
115       body.emit(assign(var, swizzle(value, i, 1), swizzle(cond, i, 1)));
116 
117    /* Put all of the new instructions in the IR stream before the old
118     * instruction.
119     */
120    base_ir->insert_before(&list);
121 
122    this->progress = true;
123    return deref(var).val;
124 }
125 
126 ir_rvalue *
convert_vector_extract_to_cond_assign(ir_rvalue * ir)127 ir_vec_index_to_cond_assign_visitor::convert_vector_extract_to_cond_assign(ir_rvalue *ir)
128 {
129    ir_expression *const expr = ir->as_expression();
130 
131    if (expr == NULL)
132       return ir;
133 
134    if (expr->operation == ir_unop_interpolate_at_centroid ||
135        expr->operation == ir_binop_interpolate_at_offset ||
136        expr->operation == ir_binop_interpolate_at_sample) {
137       /* Lower interpolateAtXxx(some_vec[idx], ...) to
138        * interpolateAtXxx(some_vec, ...)[idx] before lowering to conditional
139        * assignments, to maintain the rule that the interpolant is an l-value
140        * referring to a (part of a) shader input.
141        *
142        * This is required when idx is dynamic (otherwise it gets lowered to
143        * a swizzle).
144        */
145       ir_expression *const interpolant = expr->operands[0]->as_expression();
146       if (!interpolant || interpolant->operation != ir_binop_vector_extract)
147          return ir;
148 
149       ir_rvalue *vec_input = interpolant->operands[0];
150       ir_expression *const vec_interpolate =
151          new(base_ir) ir_expression(expr->operation, vec_input->type,
152                                     vec_input, expr->operands[1]);
153 
154       return convert_vec_index_to_cond_assign(ralloc_parent(ir),
155                                               vec_interpolate,
156                                               interpolant->operands[1],
157                                               ir->type);
158    }
159 
160    if (expr->operation != ir_binop_vector_extract)
161       return ir;
162 
163    return convert_vec_index_to_cond_assign(ralloc_parent(ir),
164                                            expr->operands[0],
165                                            expr->operands[1],
166                                            ir->type);
167 }
168 
169 ir_visitor_status
visit_enter(ir_expression * ir)170 ir_vec_index_to_cond_assign_visitor::visit_enter(ir_expression *ir)
171 {
172    for (unsigned i = 0; i < ir->num_operands; i++)
173       ir->operands[i] = convert_vector_extract_to_cond_assign(ir->operands[i]);
174 
175    return visit_continue;
176 }
177 
178 ir_visitor_status
visit_enter(ir_swizzle * ir)179 ir_vec_index_to_cond_assign_visitor::visit_enter(ir_swizzle *ir)
180 {
181    /* Can't be hit from normal GLSL, since you can't swizzle a scalar (which
182     * the result of indexing a vector is.  But maybe at some point we'll end up
183     * using swizzling of scalars for vector construction.
184     */
185    ir->val = convert_vector_extract_to_cond_assign(ir->val);
186 
187    return visit_continue;
188 }
189 
190 ir_visitor_status
visit_leave(ir_assignment * ir)191 ir_vec_index_to_cond_assign_visitor::visit_leave(ir_assignment *ir)
192 {
193    ir->rhs = convert_vector_extract_to_cond_assign(ir->rhs);
194 
195    if (ir->condition)
196       ir->condition = convert_vector_extract_to_cond_assign(ir->condition);
197 
198    return visit_continue;
199 }
200 
201 ir_visitor_status
visit_enter(ir_call * ir)202 ir_vec_index_to_cond_assign_visitor::visit_enter(ir_call *ir)
203 {
204    foreach_in_list_safe(ir_rvalue, param, &ir->actual_parameters) {
205       ir_rvalue *new_param = convert_vector_extract_to_cond_assign(param);
206 
207       if (new_param != param) {
208          param->replace_with(new_param);
209       }
210    }
211 
212    return visit_continue;
213 }
214 
215 ir_visitor_status
visit_enter(ir_return * ir)216 ir_vec_index_to_cond_assign_visitor::visit_enter(ir_return *ir)
217 {
218    if (ir->value)
219       ir->value = convert_vector_extract_to_cond_assign(ir->value);
220 
221    return visit_continue;
222 }
223 
224 ir_visitor_status
visit_enter(ir_if * ir)225 ir_vec_index_to_cond_assign_visitor::visit_enter(ir_if *ir)
226 {
227    ir->condition = convert_vector_extract_to_cond_assign(ir->condition);
228 
229    return visit_continue;
230 }
231 
232 bool
do_vec_index_to_cond_assign(exec_list * instructions)233 do_vec_index_to_cond_assign(exec_list *instructions)
234 {
235    ir_vec_index_to_cond_assign_visitor v;
236 
237    visit_list_elements(&v, instructions);
238 
239    return v.progress;
240 }
241