1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_builder.h"
30 #include "nir_control_flow_private.h"
31 #include "util/half_float.h"
32 #include <limits.h>
33 #include <assert.h>
34 #include <math.h>
35 #include "util/u_math.h"
36 
37 #include "main/menums.h" /* BITFIELD64_MASK */
38 
39 nir_shader *
nir_shader_create(void * mem_ctx,gl_shader_stage stage,const nir_shader_compiler_options * options,shader_info * si)40 nir_shader_create(void *mem_ctx,
41                   gl_shader_stage stage,
42                   const nir_shader_compiler_options *options,
43                   shader_info *si)
44 {
45    nir_shader *shader = rzalloc(mem_ctx, nir_shader);
46 
47    exec_list_make_empty(&shader->variables);
48 
49    shader->options = options;
50 
51    if (si) {
52       assert(si->stage == stage);
53       shader->info = *si;
54    } else {
55       shader->info.stage = stage;
56    }
57 
58    exec_list_make_empty(&shader->functions);
59 
60    shader->num_inputs = 0;
61    shader->num_outputs = 0;
62    shader->num_uniforms = 0;
63    shader->num_shared = 0;
64 
65    return shader;
66 }
67 
68 static nir_register *
reg_create(void * mem_ctx,struct exec_list * list)69 reg_create(void *mem_ctx, struct exec_list *list)
70 {
71    nir_register *reg = ralloc(mem_ctx, nir_register);
72 
73    list_inithead(&reg->uses);
74    list_inithead(&reg->defs);
75    list_inithead(&reg->if_uses);
76 
77    reg->num_components = 0;
78    reg->bit_size = 32;
79    reg->num_array_elems = 0;
80    reg->name = NULL;
81 
82    exec_list_push_tail(list, &reg->node);
83 
84    return reg;
85 }
86 
87 nir_register *
nir_local_reg_create(nir_function_impl * impl)88 nir_local_reg_create(nir_function_impl *impl)
89 {
90    nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
91    reg->index = impl->reg_alloc++;
92 
93    return reg;
94 }
95 
96 void
nir_reg_remove(nir_register * reg)97 nir_reg_remove(nir_register *reg)
98 {
99    exec_node_remove(&reg->node);
100 }
101 
102 void
nir_shader_add_variable(nir_shader * shader,nir_variable * var)103 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
104 {
105    switch (var->data.mode) {
106    case nir_var_function_temp:
107       assert(!"nir_shader_add_variable cannot be used for local variables");
108       return;
109 
110    case nir_var_shader_temp:
111    case nir_var_shader_in:
112    case nir_var_shader_out:
113    case nir_var_uniform:
114    case nir_var_mem_ubo:
115    case nir_var_mem_ssbo:
116    case nir_var_mem_shared:
117    case nir_var_system_value:
118       break;
119 
120    case nir_var_mem_global:
121       assert(!"nir_shader_add_variable cannot be used for global memory");
122       return;
123 
124    case nir_var_mem_push_const:
125       assert(!"nir_var_push_constant is not supposed to be used for variables");
126       return;
127 
128    default:
129       assert(!"invalid mode");
130       return;
131    }
132 
133    exec_list_push_tail(&shader->variables, &var->node);
134 }
135 
136 nir_variable *
nir_variable_create(nir_shader * shader,nir_variable_mode mode,const struct glsl_type * type,const char * name)137 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
138                     const struct glsl_type *type, const char *name)
139 {
140    nir_variable *var = rzalloc(shader, nir_variable);
141    var->name = ralloc_strdup(var, name);
142    var->type = type;
143    var->data.mode = mode;
144    var->data.how_declared = nir_var_declared_normally;
145 
146    if ((mode == nir_var_shader_in &&
147         shader->info.stage != MESA_SHADER_VERTEX) ||
148        (mode == nir_var_shader_out &&
149         shader->info.stage != MESA_SHADER_FRAGMENT))
150       var->data.interpolation = INTERP_MODE_SMOOTH;
151 
152    if (mode == nir_var_shader_in || mode == nir_var_uniform)
153       var->data.read_only = true;
154 
155    nir_shader_add_variable(shader, var);
156 
157    return var;
158 }
159 
160 nir_variable *
nir_local_variable_create(nir_function_impl * impl,const struct glsl_type * type,const char * name)161 nir_local_variable_create(nir_function_impl *impl,
162                           const struct glsl_type *type, const char *name)
163 {
164    nir_variable *var = rzalloc(impl->function->shader, nir_variable);
165    var->name = ralloc_strdup(var, name);
166    var->type = type;
167    var->data.mode = nir_var_function_temp;
168 
169    nir_function_impl_add_variable(impl, var);
170 
171    return var;
172 }
173 
174 nir_variable *
nir_find_variable_with_location(nir_shader * shader,nir_variable_mode mode,unsigned location)175 nir_find_variable_with_location(nir_shader *shader,
176                                 nir_variable_mode mode,
177                                 unsigned location)
178 {
179    assert(util_bitcount(mode) == 1 && mode != nir_var_function_temp);
180    nir_foreach_variable_with_modes(var, shader, mode) {
181       if (var->data.location == location)
182          return var;
183    }
184    return NULL;
185 }
186 
187 nir_variable *
nir_find_variable_with_driver_location(nir_shader * shader,nir_variable_mode mode,unsigned location)188 nir_find_variable_with_driver_location(nir_shader *shader,
189                                        nir_variable_mode mode,
190                                        unsigned location)
191 {
192    assert(util_bitcount(mode) == 1 && mode != nir_var_function_temp);
193    nir_foreach_variable_with_modes(var, shader, mode) {
194       if (var->data.driver_location == location)
195          return var;
196    }
197    return NULL;
198 }
199 
200 nir_function *
nir_function_create(nir_shader * shader,const char * name)201 nir_function_create(nir_shader *shader, const char *name)
202 {
203    nir_function *func = ralloc(shader, nir_function);
204 
205    exec_list_push_tail(&shader->functions, &func->node);
206 
207    func->name = ralloc_strdup(func, name);
208    func->shader = shader;
209    func->num_params = 0;
210    func->params = NULL;
211    func->impl = NULL;
212    func->is_entrypoint = false;
213 
214    return func;
215 }
216 
217 /* NOTE: if the instruction you are copying a src to is already added
218  * to the IR, use nir_instr_rewrite_src() instead.
219  */
nir_src_copy(nir_src * dest,const nir_src * src,void * mem_ctx)220 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
221 {
222    dest->is_ssa = src->is_ssa;
223    if (src->is_ssa) {
224       dest->ssa = src->ssa;
225    } else {
226       dest->reg.base_offset = src->reg.base_offset;
227       dest->reg.reg = src->reg.reg;
228       if (src->reg.indirect) {
229          dest->reg.indirect = ralloc(mem_ctx, nir_src);
230          nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
231       } else {
232          dest->reg.indirect = NULL;
233       }
234    }
235 }
236 
nir_dest_copy(nir_dest * dest,const nir_dest * src,nir_instr * instr)237 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
238 {
239    /* Copying an SSA definition makes no sense whatsoever. */
240    assert(!src->is_ssa);
241 
242    dest->is_ssa = false;
243 
244    dest->reg.base_offset = src->reg.base_offset;
245    dest->reg.reg = src->reg.reg;
246    if (src->reg.indirect) {
247       dest->reg.indirect = ralloc(instr, nir_src);
248       nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
249    } else {
250       dest->reg.indirect = NULL;
251    }
252 }
253 
254 void
nir_alu_src_copy(nir_alu_src * dest,const nir_alu_src * src,nir_alu_instr * instr)255 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
256                  nir_alu_instr *instr)
257 {
258    nir_src_copy(&dest->src, &src->src, &instr->instr);
259    dest->abs = src->abs;
260    dest->negate = src->negate;
261    for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
262       dest->swizzle[i] = src->swizzle[i];
263 }
264 
265 void
nir_alu_dest_copy(nir_alu_dest * dest,const nir_alu_dest * src,nir_alu_instr * instr)266 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
267                   nir_alu_instr *instr)
268 {
269    nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
270    dest->write_mask = src->write_mask;
271    dest->saturate = src->saturate;
272 }
273 
274 bool
nir_alu_src_is_trivial_ssa(const nir_alu_instr * alu,unsigned srcn)275 nir_alu_src_is_trivial_ssa(const nir_alu_instr *alu, unsigned srcn)
276 {
277    static uint8_t trivial_swizzle[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
278    STATIC_ASSERT(ARRAY_SIZE(trivial_swizzle) == NIR_MAX_VEC_COMPONENTS);
279 
280    const nir_alu_src *src = &alu->src[srcn];
281    unsigned num_components = nir_ssa_alu_instr_src_components(alu, srcn);
282 
283    return src->src.is_ssa && (src->src.ssa->num_components == num_components) &&
284           !src->abs && !src->negate &&
285           (memcmp(src->swizzle, trivial_swizzle, num_components) == 0);
286 }
287 
288 
289 static void
cf_init(nir_cf_node * node,nir_cf_node_type type)290 cf_init(nir_cf_node *node, nir_cf_node_type type)
291 {
292    exec_node_init(&node->node);
293    node->parent = NULL;
294    node->type = type;
295 }
296 
297 nir_function_impl *
nir_function_impl_create_bare(nir_shader * shader)298 nir_function_impl_create_bare(nir_shader *shader)
299 {
300    nir_function_impl *impl = ralloc(shader, nir_function_impl);
301 
302    impl->function = NULL;
303 
304    cf_init(&impl->cf_node, nir_cf_node_function);
305 
306    exec_list_make_empty(&impl->body);
307    exec_list_make_empty(&impl->registers);
308    exec_list_make_empty(&impl->locals);
309    impl->reg_alloc = 0;
310    impl->ssa_alloc = 0;
311    impl->valid_metadata = nir_metadata_none;
312 
313    /* create start & end blocks */
314    nir_block *start_block = nir_block_create(shader);
315    nir_block *end_block = nir_block_create(shader);
316    start_block->cf_node.parent = &impl->cf_node;
317    end_block->cf_node.parent = &impl->cf_node;
318    impl->end_block = end_block;
319 
320    exec_list_push_tail(&impl->body, &start_block->cf_node.node);
321 
322    start_block->successors[0] = end_block;
323    _mesa_set_add(end_block->predecessors, start_block);
324    return impl;
325 }
326 
327 nir_function_impl *
nir_function_impl_create(nir_function * function)328 nir_function_impl_create(nir_function *function)
329 {
330    assert(function->impl == NULL);
331 
332    nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
333 
334    function->impl = impl;
335    impl->function = function;
336 
337    return impl;
338 }
339 
340 nir_block *
nir_block_create(nir_shader * shader)341 nir_block_create(nir_shader *shader)
342 {
343    nir_block *block = rzalloc(shader, nir_block);
344 
345    cf_init(&block->cf_node, nir_cf_node_block);
346 
347    block->successors[0] = block->successors[1] = NULL;
348    block->predecessors = _mesa_pointer_set_create(block);
349    block->imm_dom = NULL;
350    /* XXX maybe it would be worth it to defer allocation?  This
351     * way it doesn't get allocated for shader refs that never run
352     * nir_calc_dominance?  For example, state-tracker creates an
353     * initial IR, clones that, runs appropriate lowering pass, passes
354     * to driver which does common lowering/opt, and then stores ref
355     * which is later used to do state specific lowering and futher
356     * opt.  Do any of the references not need dominance metadata?
357     */
358    block->dom_frontier = _mesa_pointer_set_create(block);
359 
360    exec_list_make_empty(&block->instr_list);
361 
362    return block;
363 }
364 
365 static inline void
src_init(nir_src * src)366 src_init(nir_src *src)
367 {
368    src->is_ssa = false;
369    src->reg.reg = NULL;
370    src->reg.indirect = NULL;
371    src->reg.base_offset = 0;
372 }
373 
374 nir_if *
nir_if_create(nir_shader * shader)375 nir_if_create(nir_shader *shader)
376 {
377    nir_if *if_stmt = ralloc(shader, nir_if);
378 
379    if_stmt->control = nir_selection_control_none;
380 
381    cf_init(&if_stmt->cf_node, nir_cf_node_if);
382    src_init(&if_stmt->condition);
383 
384    nir_block *then = nir_block_create(shader);
385    exec_list_make_empty(&if_stmt->then_list);
386    exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
387    then->cf_node.parent = &if_stmt->cf_node;
388 
389    nir_block *else_stmt = nir_block_create(shader);
390    exec_list_make_empty(&if_stmt->else_list);
391    exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
392    else_stmt->cf_node.parent = &if_stmt->cf_node;
393 
394    return if_stmt;
395 }
396 
397 nir_loop *
nir_loop_create(nir_shader * shader)398 nir_loop_create(nir_shader *shader)
399 {
400    nir_loop *loop = rzalloc(shader, nir_loop);
401 
402    cf_init(&loop->cf_node, nir_cf_node_loop);
403 
404    nir_block *body = nir_block_create(shader);
405    exec_list_make_empty(&loop->body);
406    exec_list_push_tail(&loop->body, &body->cf_node.node);
407    body->cf_node.parent = &loop->cf_node;
408 
409    body->successors[0] = body;
410    _mesa_set_add(body->predecessors, body);
411 
412    return loop;
413 }
414 
415 static void
instr_init(nir_instr * instr,nir_instr_type type)416 instr_init(nir_instr *instr, nir_instr_type type)
417 {
418    instr->type = type;
419    instr->block = NULL;
420    exec_node_init(&instr->node);
421 }
422 
423 static void
dest_init(nir_dest * dest)424 dest_init(nir_dest *dest)
425 {
426    dest->is_ssa = false;
427    dest->reg.reg = NULL;
428    dest->reg.indirect = NULL;
429    dest->reg.base_offset = 0;
430 }
431 
432 static void
alu_dest_init(nir_alu_dest * dest)433 alu_dest_init(nir_alu_dest *dest)
434 {
435    dest_init(&dest->dest);
436    dest->saturate = false;
437    dest->write_mask = 0xf;
438 }
439 
440 static void
alu_src_init(nir_alu_src * src)441 alu_src_init(nir_alu_src *src)
442 {
443    src_init(&src->src);
444    src->abs = src->negate = false;
445    for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
446       src->swizzle[i] = i;
447 }
448 
449 nir_alu_instr *
nir_alu_instr_create(nir_shader * shader,nir_op op)450 nir_alu_instr_create(nir_shader *shader, nir_op op)
451 {
452    unsigned num_srcs = nir_op_infos[op].num_inputs;
453    /* TODO: don't use rzalloc */
454    nir_alu_instr *instr =
455       rzalloc_size(shader,
456                    sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
457 
458    instr_init(&instr->instr, nir_instr_type_alu);
459    instr->op = op;
460    alu_dest_init(&instr->dest);
461    for (unsigned i = 0; i < num_srcs; i++)
462       alu_src_init(&instr->src[i]);
463 
464    return instr;
465 }
466 
467 nir_deref_instr *
nir_deref_instr_create(nir_shader * shader,nir_deref_type deref_type)468 nir_deref_instr_create(nir_shader *shader, nir_deref_type deref_type)
469 {
470    nir_deref_instr *instr =
471       rzalloc_size(shader, sizeof(nir_deref_instr));
472 
473    instr_init(&instr->instr, nir_instr_type_deref);
474 
475    instr->deref_type = deref_type;
476    if (deref_type != nir_deref_type_var)
477       src_init(&instr->parent);
478 
479    if (deref_type == nir_deref_type_array ||
480        deref_type == nir_deref_type_ptr_as_array)
481       src_init(&instr->arr.index);
482 
483    dest_init(&instr->dest);
484 
485    return instr;
486 }
487 
488 nir_jump_instr *
nir_jump_instr_create(nir_shader * shader,nir_jump_type type)489 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
490 {
491    nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
492    instr_init(&instr->instr, nir_instr_type_jump);
493    instr->type = type;
494    return instr;
495 }
496 
497 nir_load_const_instr *
nir_load_const_instr_create(nir_shader * shader,unsigned num_components,unsigned bit_size)498 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
499                             unsigned bit_size)
500 {
501    nir_load_const_instr *instr =
502       rzalloc_size(shader, sizeof(*instr) + num_components * sizeof(*instr->value));
503    instr_init(&instr->instr, nir_instr_type_load_const);
504 
505    nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
506 
507    return instr;
508 }
509 
510 nir_intrinsic_instr *
nir_intrinsic_instr_create(nir_shader * shader,nir_intrinsic_op op)511 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
512 {
513    unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
514    /* TODO: don't use rzalloc */
515    nir_intrinsic_instr *instr =
516       rzalloc_size(shader,
517                   sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
518 
519    instr_init(&instr->instr, nir_instr_type_intrinsic);
520    instr->intrinsic = op;
521 
522    if (nir_intrinsic_infos[op].has_dest)
523       dest_init(&instr->dest);
524 
525    for (unsigned i = 0; i < num_srcs; i++)
526       src_init(&instr->src[i]);
527 
528    return instr;
529 }
530 
531 nir_call_instr *
nir_call_instr_create(nir_shader * shader,nir_function * callee)532 nir_call_instr_create(nir_shader *shader, nir_function *callee)
533 {
534    const unsigned num_params = callee->num_params;
535    nir_call_instr *instr =
536       rzalloc_size(shader, sizeof(*instr) +
537                    num_params * sizeof(instr->params[0]));
538 
539    instr_init(&instr->instr, nir_instr_type_call);
540    instr->callee = callee;
541    instr->num_params = num_params;
542    for (unsigned i = 0; i < num_params; i++)
543       src_init(&instr->params[i]);
544 
545    return instr;
546 }
547 
548 static int8_t default_tg4_offsets[4][2] =
549 {
550    { 0, 1 },
551    { 1, 1 },
552    { 1, 0 },
553    { 0, 0 },
554 };
555 
556 nir_tex_instr *
nir_tex_instr_create(nir_shader * shader,unsigned num_srcs)557 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
558 {
559    nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
560    instr_init(&instr->instr, nir_instr_type_tex);
561 
562    dest_init(&instr->dest);
563 
564    instr->num_srcs = num_srcs;
565    instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
566    for (unsigned i = 0; i < num_srcs; i++)
567       src_init(&instr->src[i].src);
568 
569    instr->texture_index = 0;
570    instr->sampler_index = 0;
571    memcpy(instr->tg4_offsets, default_tg4_offsets, sizeof(instr->tg4_offsets));
572 
573    return instr;
574 }
575 
576 void
nir_tex_instr_add_src(nir_tex_instr * tex,nir_tex_src_type src_type,nir_src src)577 nir_tex_instr_add_src(nir_tex_instr *tex,
578                       nir_tex_src_type src_type,
579                       nir_src src)
580 {
581    nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
582                                          tex->num_srcs + 1);
583 
584    for (unsigned i = 0; i < tex->num_srcs; i++) {
585       new_srcs[i].src_type = tex->src[i].src_type;
586       nir_instr_move_src(&tex->instr, &new_srcs[i].src,
587                          &tex->src[i].src);
588    }
589 
590    ralloc_free(tex->src);
591    tex->src = new_srcs;
592 
593    tex->src[tex->num_srcs].src_type = src_type;
594    nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
595    tex->num_srcs++;
596 }
597 
598 void
nir_tex_instr_remove_src(nir_tex_instr * tex,unsigned src_idx)599 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
600 {
601    assert(src_idx < tex->num_srcs);
602 
603    /* First rewrite the source to NIR_SRC_INIT */
604    nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
605 
606    /* Now, move all of the other sources down */
607    for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
608       tex->src[i-1].src_type = tex->src[i].src_type;
609       nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
610    }
611    tex->num_srcs--;
612 }
613 
614 bool
nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr * tex)615 nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex)
616 {
617    if (tex->op != nir_texop_tg4)
618       return false;
619    return memcmp(tex->tg4_offsets, default_tg4_offsets,
620                  sizeof(tex->tg4_offsets)) != 0;
621 }
622 
623 nir_phi_instr *
nir_phi_instr_create(nir_shader * shader)624 nir_phi_instr_create(nir_shader *shader)
625 {
626    nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
627    instr_init(&instr->instr, nir_instr_type_phi);
628 
629    dest_init(&instr->dest);
630    exec_list_make_empty(&instr->srcs);
631    return instr;
632 }
633 
634 nir_parallel_copy_instr *
nir_parallel_copy_instr_create(nir_shader * shader)635 nir_parallel_copy_instr_create(nir_shader *shader)
636 {
637    nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
638    instr_init(&instr->instr, nir_instr_type_parallel_copy);
639 
640    exec_list_make_empty(&instr->entries);
641 
642    return instr;
643 }
644 
645 nir_ssa_undef_instr *
nir_ssa_undef_instr_create(nir_shader * shader,unsigned num_components,unsigned bit_size)646 nir_ssa_undef_instr_create(nir_shader *shader,
647                            unsigned num_components,
648                            unsigned bit_size)
649 {
650    nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
651    instr_init(&instr->instr, nir_instr_type_ssa_undef);
652 
653    nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
654 
655    return instr;
656 }
657 
658 static nir_const_value
const_value_float(double d,unsigned bit_size)659 const_value_float(double d, unsigned bit_size)
660 {
661    nir_const_value v;
662    memset(&v, 0, sizeof(v));
663    switch (bit_size) {
664    case 16: v.u16 = _mesa_float_to_half(d);  break;
665    case 32: v.f32 = d;                       break;
666    case 64: v.f64 = d;                       break;
667    default:
668       unreachable("Invalid bit size");
669    }
670    return v;
671 }
672 
673 static nir_const_value
const_value_int(int64_t i,unsigned bit_size)674 const_value_int(int64_t i, unsigned bit_size)
675 {
676    nir_const_value v;
677    memset(&v, 0, sizeof(v));
678    switch (bit_size) {
679    case 1:  v.b   = i & 1;  break;
680    case 8:  v.i8  = i;  break;
681    case 16: v.i16 = i;  break;
682    case 32: v.i32 = i;  break;
683    case 64: v.i64 = i;  break;
684    default:
685       unreachable("Invalid bit size");
686    }
687    return v;
688 }
689 
690 nir_const_value
nir_alu_binop_identity(nir_op binop,unsigned bit_size)691 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
692 {
693    const int64_t max_int = (1ull << (bit_size - 1)) - 1;
694    const int64_t min_int = -max_int - 1;
695    switch (binop) {
696    case nir_op_iadd:
697       return const_value_int(0, bit_size);
698    case nir_op_fadd:
699       return const_value_float(0, bit_size);
700    case nir_op_imul:
701       return const_value_int(1, bit_size);
702    case nir_op_fmul:
703       return const_value_float(1, bit_size);
704    case nir_op_imin:
705       return const_value_int(max_int, bit_size);
706    case nir_op_umin:
707       return const_value_int(~0ull, bit_size);
708    case nir_op_fmin:
709       return const_value_float(INFINITY, bit_size);
710    case nir_op_imax:
711       return const_value_int(min_int, bit_size);
712    case nir_op_umax:
713       return const_value_int(0, bit_size);
714    case nir_op_fmax:
715       return const_value_float(-INFINITY, bit_size);
716    case nir_op_iand:
717       return const_value_int(~0ull, bit_size);
718    case nir_op_ior:
719       return const_value_int(0, bit_size);
720    case nir_op_ixor:
721       return const_value_int(0, bit_size);
722    default:
723       unreachable("Invalid reduction operation");
724    }
725 }
726 
727 nir_function_impl *
nir_cf_node_get_function(nir_cf_node * node)728 nir_cf_node_get_function(nir_cf_node *node)
729 {
730    while (node->type != nir_cf_node_function) {
731       node = node->parent;
732    }
733 
734    return nir_cf_node_as_function(node);
735 }
736 
737 /* Reduces a cursor by trying to convert everything to after and trying to
738  * go up to block granularity when possible.
739  */
740 static nir_cursor
reduce_cursor(nir_cursor cursor)741 reduce_cursor(nir_cursor cursor)
742 {
743    switch (cursor.option) {
744    case nir_cursor_before_block:
745       assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
746              nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
747       if (exec_list_is_empty(&cursor.block->instr_list)) {
748          /* Empty block.  After is as good as before. */
749          cursor.option = nir_cursor_after_block;
750       }
751       return cursor;
752 
753    case nir_cursor_after_block:
754       return cursor;
755 
756    case nir_cursor_before_instr: {
757       nir_instr *prev_instr = nir_instr_prev(cursor.instr);
758       if (prev_instr) {
759          /* Before this instruction is after the previous */
760          cursor.instr = prev_instr;
761          cursor.option = nir_cursor_after_instr;
762       } else {
763          /* No previous instruction.  Switch to before block */
764          cursor.block = cursor.instr->block;
765          cursor.option = nir_cursor_before_block;
766       }
767       return reduce_cursor(cursor);
768    }
769 
770    case nir_cursor_after_instr:
771       if (nir_instr_next(cursor.instr) == NULL) {
772          /* This is the last instruction, switch to after block */
773          cursor.option = nir_cursor_after_block;
774          cursor.block = cursor.instr->block;
775       }
776       return cursor;
777 
778    default:
779       unreachable("Inavlid cursor option");
780    }
781 }
782 
783 bool
nir_cursors_equal(nir_cursor a,nir_cursor b)784 nir_cursors_equal(nir_cursor a, nir_cursor b)
785 {
786    /* Reduced cursors should be unique */
787    a = reduce_cursor(a);
788    b = reduce_cursor(b);
789 
790    return a.block == b.block && a.option == b.option;
791 }
792 
793 static bool
add_use_cb(nir_src * src,void * state)794 add_use_cb(nir_src *src, void *state)
795 {
796    nir_instr *instr = state;
797 
798    src->parent_instr = instr;
799    list_addtail(&src->use_link,
800                 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
801 
802    return true;
803 }
804 
805 static bool
add_ssa_def_cb(nir_ssa_def * def,void * state)806 add_ssa_def_cb(nir_ssa_def *def, void *state)
807 {
808    nir_instr *instr = state;
809 
810    if (instr->block && def->index == UINT_MAX) {
811       nir_function_impl *impl =
812          nir_cf_node_get_function(&instr->block->cf_node);
813 
814       def->index = impl->ssa_alloc++;
815    }
816 
817    return true;
818 }
819 
820 static bool
add_reg_def_cb(nir_dest * dest,void * state)821 add_reg_def_cb(nir_dest *dest, void *state)
822 {
823    nir_instr *instr = state;
824 
825    if (!dest->is_ssa) {
826       dest->reg.parent_instr = instr;
827       list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
828    }
829 
830    return true;
831 }
832 
833 static void
add_defs_uses(nir_instr * instr)834 add_defs_uses(nir_instr *instr)
835 {
836    nir_foreach_src(instr, add_use_cb, instr);
837    nir_foreach_dest(instr, add_reg_def_cb, instr);
838    nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
839 }
840 
841 void
nir_instr_insert(nir_cursor cursor,nir_instr * instr)842 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
843 {
844    switch (cursor.option) {
845    case nir_cursor_before_block:
846       /* Only allow inserting jumps into empty blocks. */
847       if (instr->type == nir_instr_type_jump)
848          assert(exec_list_is_empty(&cursor.block->instr_list));
849 
850       instr->block = cursor.block;
851       add_defs_uses(instr);
852       exec_list_push_head(&cursor.block->instr_list, &instr->node);
853       break;
854    case nir_cursor_after_block: {
855       /* Inserting instructions after a jump is illegal. */
856       nir_instr *last = nir_block_last_instr(cursor.block);
857       assert(last == NULL || last->type != nir_instr_type_jump);
858       (void) last;
859 
860       instr->block = cursor.block;
861       add_defs_uses(instr);
862       exec_list_push_tail(&cursor.block->instr_list, &instr->node);
863       break;
864    }
865    case nir_cursor_before_instr:
866       assert(instr->type != nir_instr_type_jump);
867       instr->block = cursor.instr->block;
868       add_defs_uses(instr);
869       exec_node_insert_node_before(&cursor.instr->node, &instr->node);
870       break;
871    case nir_cursor_after_instr:
872       /* Inserting instructions after a jump is illegal. */
873       assert(cursor.instr->type != nir_instr_type_jump);
874 
875       /* Only allow inserting jumps at the end of the block. */
876       if (instr->type == nir_instr_type_jump)
877          assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
878 
879       instr->block = cursor.instr->block;
880       add_defs_uses(instr);
881       exec_node_insert_after(&cursor.instr->node, &instr->node);
882       break;
883    }
884 
885    if (instr->type == nir_instr_type_jump)
886       nir_handle_add_jump(instr->block);
887 }
888 
889 static bool
src_is_valid(const nir_src * src)890 src_is_valid(const nir_src *src)
891 {
892    return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
893 }
894 
895 static bool
remove_use_cb(nir_src * src,void * state)896 remove_use_cb(nir_src *src, void *state)
897 {
898    (void) state;
899 
900    if (src_is_valid(src))
901       list_del(&src->use_link);
902 
903    return true;
904 }
905 
906 static bool
remove_def_cb(nir_dest * dest,void * state)907 remove_def_cb(nir_dest *dest, void *state)
908 {
909    (void) state;
910 
911    if (!dest->is_ssa)
912       list_del(&dest->reg.def_link);
913 
914    return true;
915 }
916 
917 static void
remove_defs_uses(nir_instr * instr)918 remove_defs_uses(nir_instr *instr)
919 {
920    nir_foreach_dest(instr, remove_def_cb, instr);
921    nir_foreach_src(instr, remove_use_cb, instr);
922 }
923 
nir_instr_remove_v(nir_instr * instr)924 void nir_instr_remove_v(nir_instr *instr)
925 {
926    remove_defs_uses(instr);
927    exec_node_remove(&instr->node);
928 
929    if (instr->type == nir_instr_type_jump) {
930       nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
931       nir_handle_remove_jump(instr->block, jump_instr->type);
932    }
933 }
934 
935 /*@}*/
936 
937 void
nir_index_local_regs(nir_function_impl * impl)938 nir_index_local_regs(nir_function_impl *impl)
939 {
940    unsigned index = 0;
941    foreach_list_typed(nir_register, reg, node, &impl->registers) {
942       reg->index = index++;
943    }
944    impl->reg_alloc = index;
945 }
946 
947 static bool
visit_alu_dest(nir_alu_instr * instr,nir_foreach_dest_cb cb,void * state)948 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
949 {
950    return cb(&instr->dest.dest, state);
951 }
952 
953 static bool
visit_deref_dest(nir_deref_instr * instr,nir_foreach_dest_cb cb,void * state)954 visit_deref_dest(nir_deref_instr *instr, nir_foreach_dest_cb cb, void *state)
955 {
956    return cb(&instr->dest, state);
957 }
958 
959 static bool
visit_intrinsic_dest(nir_intrinsic_instr * instr,nir_foreach_dest_cb cb,void * state)960 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
961                      void *state)
962 {
963    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
964       return cb(&instr->dest, state);
965 
966    return true;
967 }
968 
969 static bool
visit_texture_dest(nir_tex_instr * instr,nir_foreach_dest_cb cb,void * state)970 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
971                    void *state)
972 {
973    return cb(&instr->dest, state);
974 }
975 
976 static bool
visit_phi_dest(nir_phi_instr * instr,nir_foreach_dest_cb cb,void * state)977 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
978 {
979    return cb(&instr->dest, state);
980 }
981 
982 static bool
visit_parallel_copy_dest(nir_parallel_copy_instr * instr,nir_foreach_dest_cb cb,void * state)983 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
984                          nir_foreach_dest_cb cb, void *state)
985 {
986    nir_foreach_parallel_copy_entry(entry, instr) {
987       if (!cb(&entry->dest, state))
988          return false;
989    }
990 
991    return true;
992 }
993 
994 bool
nir_foreach_dest(nir_instr * instr,nir_foreach_dest_cb cb,void * state)995 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
996 {
997    switch (instr->type) {
998    case nir_instr_type_alu:
999       return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
1000    case nir_instr_type_deref:
1001       return visit_deref_dest(nir_instr_as_deref(instr), cb, state);
1002    case nir_instr_type_intrinsic:
1003       return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
1004    case nir_instr_type_tex:
1005       return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
1006    case nir_instr_type_phi:
1007       return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
1008    case nir_instr_type_parallel_copy:
1009       return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
1010                                       cb, state);
1011 
1012    case nir_instr_type_load_const:
1013    case nir_instr_type_ssa_undef:
1014    case nir_instr_type_call:
1015    case nir_instr_type_jump:
1016       break;
1017 
1018    default:
1019       unreachable("Invalid instruction type");
1020       break;
1021    }
1022 
1023    return true;
1024 }
1025 
1026 struct foreach_ssa_def_state {
1027    nir_foreach_ssa_def_cb cb;
1028    void *client_state;
1029 };
1030 
1031 static inline bool
nir_ssa_def_visitor(nir_dest * dest,void * void_state)1032 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1033 {
1034    struct foreach_ssa_def_state *state = void_state;
1035 
1036    if (dest->is_ssa)
1037       return state->cb(&dest->ssa, state->client_state);
1038    else
1039       return true;
1040 }
1041 
1042 bool
nir_foreach_ssa_def(nir_instr * instr,nir_foreach_ssa_def_cb cb,void * state)1043 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1044 {
1045    switch (instr->type) {
1046    case nir_instr_type_alu:
1047    case nir_instr_type_deref:
1048    case nir_instr_type_tex:
1049    case nir_instr_type_intrinsic:
1050    case nir_instr_type_phi:
1051    case nir_instr_type_parallel_copy: {
1052       struct foreach_ssa_def_state foreach_state = {cb, state};
1053       return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1054    }
1055 
1056    case nir_instr_type_load_const:
1057       return cb(&nir_instr_as_load_const(instr)->def, state);
1058    case nir_instr_type_ssa_undef:
1059       return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1060    case nir_instr_type_call:
1061    case nir_instr_type_jump:
1062       return true;
1063    default:
1064       unreachable("Invalid instruction type");
1065    }
1066 }
1067 
1068 nir_ssa_def *
nir_instr_ssa_def(nir_instr * instr)1069 nir_instr_ssa_def(nir_instr *instr)
1070 {
1071    switch (instr->type) {
1072    case nir_instr_type_alu:
1073       assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
1074       return &nir_instr_as_alu(instr)->dest.dest.ssa;
1075 
1076    case nir_instr_type_deref:
1077       assert(nir_instr_as_deref(instr)->dest.is_ssa);
1078       return &nir_instr_as_deref(instr)->dest.ssa;
1079 
1080    case nir_instr_type_tex:
1081       assert(nir_instr_as_tex(instr)->dest.is_ssa);
1082       return &nir_instr_as_tex(instr)->dest.ssa;
1083 
1084    case nir_instr_type_intrinsic: {
1085       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1086       if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
1087          assert(intrin->dest.is_ssa);
1088          return &intrin->dest.ssa;
1089       } else {
1090          return NULL;
1091       }
1092    }
1093 
1094    case nir_instr_type_phi:
1095       assert(nir_instr_as_phi(instr)->dest.is_ssa);
1096       return &nir_instr_as_phi(instr)->dest.ssa;
1097 
1098    case nir_instr_type_parallel_copy:
1099       unreachable("Parallel copies are unsupported by this function");
1100 
1101    case nir_instr_type_load_const:
1102       return &nir_instr_as_load_const(instr)->def;
1103 
1104    case nir_instr_type_ssa_undef:
1105       return &nir_instr_as_ssa_undef(instr)->def;
1106 
1107    case nir_instr_type_call:
1108    case nir_instr_type_jump:
1109       return NULL;
1110    }
1111 
1112    unreachable("Invalid instruction type");
1113 }
1114 
1115 static bool
visit_src(nir_src * src,nir_foreach_src_cb cb,void * state)1116 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1117 {
1118    if (!cb(src, state))
1119       return false;
1120    if (!src->is_ssa && src->reg.indirect)
1121       return cb(src->reg.indirect, state);
1122    return true;
1123 }
1124 
1125 static bool
visit_alu_src(nir_alu_instr * instr,nir_foreach_src_cb cb,void * state)1126 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1127 {
1128    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1129       if (!visit_src(&instr->src[i].src, cb, state))
1130          return false;
1131 
1132    return true;
1133 }
1134 
1135 static bool
visit_deref_instr_src(nir_deref_instr * instr,nir_foreach_src_cb cb,void * state)1136 visit_deref_instr_src(nir_deref_instr *instr,
1137                       nir_foreach_src_cb cb, void *state)
1138 {
1139    if (instr->deref_type != nir_deref_type_var) {
1140       if (!visit_src(&instr->parent, cb, state))
1141          return false;
1142    }
1143 
1144    if (instr->deref_type == nir_deref_type_array ||
1145        instr->deref_type == nir_deref_type_ptr_as_array) {
1146       if (!visit_src(&instr->arr.index, cb, state))
1147          return false;
1148    }
1149 
1150    return true;
1151 }
1152 
1153 static bool
visit_tex_src(nir_tex_instr * instr,nir_foreach_src_cb cb,void * state)1154 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1155 {
1156    for (unsigned i = 0; i < instr->num_srcs; i++) {
1157       if (!visit_src(&instr->src[i].src, cb, state))
1158          return false;
1159    }
1160 
1161    return true;
1162 }
1163 
1164 static bool
visit_intrinsic_src(nir_intrinsic_instr * instr,nir_foreach_src_cb cb,void * state)1165 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1166                     void *state)
1167 {
1168    unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1169    for (unsigned i = 0; i < num_srcs; i++) {
1170       if (!visit_src(&instr->src[i], cb, state))
1171          return false;
1172    }
1173 
1174    return true;
1175 }
1176 
1177 static bool
visit_call_src(nir_call_instr * instr,nir_foreach_src_cb cb,void * state)1178 visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
1179 {
1180    for (unsigned i = 0; i < instr->num_params; i++) {
1181       if (!visit_src(&instr->params[i], cb, state))
1182          return false;
1183    }
1184 
1185    return true;
1186 }
1187 
1188 static bool
visit_phi_src(nir_phi_instr * instr,nir_foreach_src_cb cb,void * state)1189 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1190 {
1191    nir_foreach_phi_src(src, instr) {
1192       if (!visit_src(&src->src, cb, state))
1193          return false;
1194    }
1195 
1196    return true;
1197 }
1198 
1199 static bool
visit_parallel_copy_src(nir_parallel_copy_instr * instr,nir_foreach_src_cb cb,void * state)1200 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1201                         nir_foreach_src_cb cb, void *state)
1202 {
1203    nir_foreach_parallel_copy_entry(entry, instr) {
1204       if (!visit_src(&entry->src, cb, state))
1205          return false;
1206    }
1207 
1208    return true;
1209 }
1210 
1211 typedef struct {
1212    void *state;
1213    nir_foreach_src_cb cb;
1214 } visit_dest_indirect_state;
1215 
1216 static bool
visit_dest_indirect(nir_dest * dest,void * _state)1217 visit_dest_indirect(nir_dest *dest, void *_state)
1218 {
1219    visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1220 
1221    if (!dest->is_ssa && dest->reg.indirect)
1222       return state->cb(dest->reg.indirect, state->state);
1223 
1224    return true;
1225 }
1226 
1227 bool
nir_foreach_src(nir_instr * instr,nir_foreach_src_cb cb,void * state)1228 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1229 {
1230    switch (instr->type) {
1231    case nir_instr_type_alu:
1232       if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1233          return false;
1234       break;
1235    case nir_instr_type_deref:
1236       if (!visit_deref_instr_src(nir_instr_as_deref(instr), cb, state))
1237          return false;
1238       break;
1239    case nir_instr_type_intrinsic:
1240       if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1241          return false;
1242       break;
1243    case nir_instr_type_tex:
1244       if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1245          return false;
1246       break;
1247    case nir_instr_type_call:
1248       if (!visit_call_src(nir_instr_as_call(instr), cb, state))
1249          return false;
1250       break;
1251    case nir_instr_type_load_const:
1252       /* Constant load instructions have no regular sources */
1253       break;
1254    case nir_instr_type_phi:
1255       if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1256          return false;
1257       break;
1258    case nir_instr_type_parallel_copy:
1259       if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1260                                    cb, state))
1261          return false;
1262       break;
1263    case nir_instr_type_jump:
1264    case nir_instr_type_ssa_undef:
1265       return true;
1266 
1267    default:
1268       unreachable("Invalid instruction type");
1269       break;
1270    }
1271 
1272    visit_dest_indirect_state dest_state;
1273    dest_state.state = state;
1274    dest_state.cb = cb;
1275    return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1276 }
1277 
1278 bool
nir_foreach_phi_src_leaving_block(nir_block * block,nir_foreach_src_cb cb,void * state)1279 nir_foreach_phi_src_leaving_block(nir_block *block,
1280                                   nir_foreach_src_cb cb,
1281                                   void *state)
1282 {
1283    for (unsigned i = 0; i < ARRAY_SIZE(block->successors); i++) {
1284       if (block->successors[i] == NULL)
1285          continue;
1286 
1287       nir_foreach_instr(instr, block->successors[i]) {
1288          if (instr->type != nir_instr_type_phi)
1289             break;
1290 
1291          nir_phi_instr *phi = nir_instr_as_phi(instr);
1292          nir_foreach_phi_src(phi_src, phi) {
1293             if (phi_src->pred == block) {
1294                if (!cb(&phi_src->src, state))
1295                   return false;
1296             }
1297          }
1298       }
1299    }
1300 
1301    return true;
1302 }
1303 
1304 nir_const_value
nir_const_value_for_float(double f,unsigned bit_size)1305 nir_const_value_for_float(double f, unsigned bit_size)
1306 {
1307    nir_const_value v;
1308    memset(&v, 0, sizeof(v));
1309 
1310    switch (bit_size) {
1311    case 16:
1312       v.u16 = _mesa_float_to_half(f);
1313       break;
1314    case 32:
1315       v.f32 = f;
1316       break;
1317    case 64:
1318       v.f64 = f;
1319       break;
1320    default:
1321       unreachable("Invalid bit size");
1322    }
1323 
1324    return v;
1325 }
1326 
1327 double
nir_const_value_as_float(nir_const_value value,unsigned bit_size)1328 nir_const_value_as_float(nir_const_value value, unsigned bit_size)
1329 {
1330    switch (bit_size) {
1331    case 16: return _mesa_half_to_float(value.u16);
1332    case 32: return value.f32;
1333    case 64: return value.f64;
1334    default:
1335       unreachable("Invalid bit size");
1336    }
1337 }
1338 
1339 nir_const_value *
nir_src_as_const_value(nir_src src)1340 nir_src_as_const_value(nir_src src)
1341 {
1342    if (!src.is_ssa)
1343       return NULL;
1344 
1345    if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1346       return NULL;
1347 
1348    nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1349 
1350    return load->value;
1351 }
1352 
1353 /**
1354  * Returns true if the source is known to be dynamically uniform. Otherwise it
1355  * returns false which means it may or may not be dynamically uniform but it
1356  * can't be determined.
1357  */
1358 bool
nir_src_is_dynamically_uniform(nir_src src)1359 nir_src_is_dynamically_uniform(nir_src src)
1360 {
1361    if (!src.is_ssa)
1362       return false;
1363 
1364    /* Constants are trivially dynamically uniform */
1365    if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1366       return true;
1367 
1368    /* As are uniform variables */
1369    if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1370       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1371       if (intr->intrinsic == nir_intrinsic_load_uniform &&
1372           nir_src_is_dynamically_uniform(intr->src[0]))
1373          return true;
1374    }
1375 
1376    /* Operating together dynamically uniform expressions produces a
1377     * dynamically uniform result
1378     */
1379    if (src.ssa->parent_instr->type == nir_instr_type_alu) {
1380       nir_alu_instr *alu = nir_instr_as_alu(src.ssa->parent_instr);
1381       for (int i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1382          if (!nir_src_is_dynamically_uniform(alu->src[i].src))
1383             return false;
1384       }
1385 
1386       return true;
1387    }
1388 
1389    /* XXX: this could have many more tests, such as when a sampler function is
1390     * called with dynamically uniform arguments.
1391     */
1392    return false;
1393 }
1394 
1395 static void
src_remove_all_uses(nir_src * src)1396 src_remove_all_uses(nir_src *src)
1397 {
1398    for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1399       if (!src_is_valid(src))
1400          continue;
1401 
1402       list_del(&src->use_link);
1403    }
1404 }
1405 
1406 static void
src_add_all_uses(nir_src * src,nir_instr * parent_instr,nir_if * parent_if)1407 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1408 {
1409    for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1410       if (!src_is_valid(src))
1411          continue;
1412 
1413       if (parent_instr) {
1414          src->parent_instr = parent_instr;
1415          if (src->is_ssa)
1416             list_addtail(&src->use_link, &src->ssa->uses);
1417          else
1418             list_addtail(&src->use_link, &src->reg.reg->uses);
1419       } else {
1420          assert(parent_if);
1421          src->parent_if = parent_if;
1422          if (src->is_ssa)
1423             list_addtail(&src->use_link, &src->ssa->if_uses);
1424          else
1425             list_addtail(&src->use_link, &src->reg.reg->if_uses);
1426       }
1427    }
1428 }
1429 
1430 void
nir_instr_rewrite_src(nir_instr * instr,nir_src * src,nir_src new_src)1431 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1432 {
1433    assert(!src_is_valid(src) || src->parent_instr == instr);
1434 
1435    src_remove_all_uses(src);
1436    *src = new_src;
1437    src_add_all_uses(src, instr, NULL);
1438 }
1439 
1440 void
nir_instr_move_src(nir_instr * dest_instr,nir_src * dest,nir_src * src)1441 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1442 {
1443    assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1444 
1445    src_remove_all_uses(dest);
1446    src_remove_all_uses(src);
1447    *dest = *src;
1448    *src = NIR_SRC_INIT;
1449    src_add_all_uses(dest, dest_instr, NULL);
1450 }
1451 
1452 void
nir_if_rewrite_condition(nir_if * if_stmt,nir_src new_src)1453 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1454 {
1455    nir_src *src = &if_stmt->condition;
1456    assert(!src_is_valid(src) || src->parent_if == if_stmt);
1457 
1458    src_remove_all_uses(src);
1459    *src = new_src;
1460    src_add_all_uses(src, NULL, if_stmt);
1461 }
1462 
1463 void
nir_instr_rewrite_dest(nir_instr * instr,nir_dest * dest,nir_dest new_dest)1464 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1465 {
1466    if (dest->is_ssa) {
1467       /* We can only overwrite an SSA destination if it has no uses. */
1468       assert(list_is_empty(&dest->ssa.uses) && list_is_empty(&dest->ssa.if_uses));
1469    } else {
1470       list_del(&dest->reg.def_link);
1471       if (dest->reg.indirect)
1472          src_remove_all_uses(dest->reg.indirect);
1473    }
1474 
1475    /* We can't re-write with an SSA def */
1476    assert(!new_dest.is_ssa);
1477 
1478    nir_dest_copy(dest, &new_dest, instr);
1479 
1480    dest->reg.parent_instr = instr;
1481    list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1482 
1483    if (dest->reg.indirect)
1484       src_add_all_uses(dest->reg.indirect, instr, NULL);
1485 }
1486 
1487 /* note: does *not* take ownership of 'name' */
1488 void
nir_ssa_def_init(nir_instr * instr,nir_ssa_def * def,unsigned num_components,unsigned bit_size,const char * name)1489 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1490                  unsigned num_components,
1491                  unsigned bit_size, const char *name)
1492 {
1493    def->name = ralloc_strdup(instr, name);
1494    def->parent_instr = instr;
1495    list_inithead(&def->uses);
1496    list_inithead(&def->if_uses);
1497    def->num_components = num_components;
1498    def->bit_size = bit_size;
1499    def->divergent = true; /* This is the safer default */
1500 
1501    if (instr->block) {
1502       nir_function_impl *impl =
1503          nir_cf_node_get_function(&instr->block->cf_node);
1504 
1505       def->index = impl->ssa_alloc++;
1506    } else {
1507       def->index = UINT_MAX;
1508    }
1509 }
1510 
1511 /* note: does *not* take ownership of 'name' */
1512 void
nir_ssa_dest_init(nir_instr * instr,nir_dest * dest,unsigned num_components,unsigned bit_size,const char * name)1513 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1514                  unsigned num_components, unsigned bit_size,
1515                  const char *name)
1516 {
1517    dest->is_ssa = true;
1518    nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1519 }
1520 
1521 void
nir_ssa_def_rewrite_uses(nir_ssa_def * def,nir_src new_src)1522 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1523 {
1524    assert(!new_src.is_ssa || def != new_src.ssa);
1525 
1526    nir_foreach_use_safe(use_src, def)
1527       nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1528 
1529    nir_foreach_if_use_safe(use_src, def)
1530       nir_if_rewrite_condition(use_src->parent_if, new_src);
1531 }
1532 
1533 static bool
is_instr_between(nir_instr * start,nir_instr * end,nir_instr * between)1534 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1535 {
1536    assert(start->block == end->block);
1537 
1538    if (between->block != start->block)
1539       return false;
1540 
1541    /* Search backwards looking for "between" */
1542    while (start != end) {
1543       if (between == end)
1544          return true;
1545 
1546       end = nir_instr_prev(end);
1547       assert(end);
1548    }
1549 
1550    return false;
1551 }
1552 
1553 /* Replaces all uses of the given SSA def with the given source but only if
1554  * the use comes after the after_me instruction.  This can be useful if you
1555  * are emitting code to fix up the result of some instruction: you can freely
1556  * use the result in that code and then call rewrite_uses_after and pass the
1557  * last fixup instruction as after_me and it will replace all of the uses you
1558  * want without touching the fixup code.
1559  *
1560  * This function assumes that after_me is in the same block as
1561  * def->parent_instr and that after_me comes after def->parent_instr.
1562  */
1563 void
nir_ssa_def_rewrite_uses_after(nir_ssa_def * def,nir_src new_src,nir_instr * after_me)1564 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1565                                nir_instr *after_me)
1566 {
1567    if (new_src.is_ssa && def == new_src.ssa)
1568       return;
1569 
1570    nir_foreach_use_safe(use_src, def) {
1571       assert(use_src->parent_instr != def->parent_instr);
1572       /* Since def already dominates all of its uses, the only way a use can
1573        * not be dominated by after_me is if it is between def and after_me in
1574        * the instruction list.
1575        */
1576       if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1577          nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1578    }
1579 
1580    nir_foreach_if_use_safe(use_src, def)
1581       nir_if_rewrite_condition(use_src->parent_if, new_src);
1582 }
1583 
1584 nir_component_mask_t
nir_ssa_def_components_read(const nir_ssa_def * def)1585 nir_ssa_def_components_read(const nir_ssa_def *def)
1586 {
1587    nir_component_mask_t read_mask = 0;
1588    nir_foreach_use(use, def) {
1589       if (use->parent_instr->type == nir_instr_type_alu) {
1590          nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1591          nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1592          int src_idx = alu_src - &alu->src[0];
1593          assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1594          read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
1595       } else {
1596          return (1 << def->num_components) - 1;
1597       }
1598    }
1599 
1600    if (!list_is_empty(&def->if_uses))
1601       read_mask |= 1;
1602 
1603    return read_mask;
1604 }
1605 
1606 nir_block *
nir_block_cf_tree_next(nir_block * block)1607 nir_block_cf_tree_next(nir_block *block)
1608 {
1609    if (block == NULL) {
1610       /* nir_foreach_block_safe() will call this function on a NULL block
1611        * after the last iteration, but it won't use the result so just return
1612        * NULL here.
1613        */
1614       return NULL;
1615    }
1616 
1617    nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1618    if (cf_next)
1619       return nir_cf_node_cf_tree_first(cf_next);
1620 
1621    nir_cf_node *parent = block->cf_node.parent;
1622 
1623    switch (parent->type) {
1624    case nir_cf_node_if: {
1625       /* Are we at the end of the if? Go to the beginning of the else */
1626       nir_if *if_stmt = nir_cf_node_as_if(parent);
1627       if (block == nir_if_last_then_block(if_stmt))
1628          return nir_if_first_else_block(if_stmt);
1629 
1630       assert(block == nir_if_last_else_block(if_stmt));
1631    }
1632    /* fallthrough */
1633 
1634    case nir_cf_node_loop:
1635       return nir_cf_node_as_block(nir_cf_node_next(parent));
1636 
1637    case nir_cf_node_function:
1638       return NULL;
1639 
1640    default:
1641       unreachable("unknown cf node type");
1642    }
1643 }
1644 
1645 nir_block *
nir_block_cf_tree_prev(nir_block * block)1646 nir_block_cf_tree_prev(nir_block *block)
1647 {
1648    if (block == NULL) {
1649       /* do this for consistency with nir_block_cf_tree_next() */
1650       return NULL;
1651    }
1652 
1653    nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1654    if (cf_prev)
1655       return nir_cf_node_cf_tree_last(cf_prev);
1656 
1657    nir_cf_node *parent = block->cf_node.parent;
1658 
1659    switch (parent->type) {
1660    case nir_cf_node_if: {
1661       /* Are we at the beginning of the else? Go to the end of the if */
1662       nir_if *if_stmt = nir_cf_node_as_if(parent);
1663       if (block == nir_if_first_else_block(if_stmt))
1664          return nir_if_last_then_block(if_stmt);
1665 
1666       assert(block == nir_if_first_then_block(if_stmt));
1667    }
1668    /* fallthrough */
1669 
1670    case nir_cf_node_loop:
1671       return nir_cf_node_as_block(nir_cf_node_prev(parent));
1672 
1673    case nir_cf_node_function:
1674       return NULL;
1675 
1676    default:
1677       unreachable("unknown cf node type");
1678    }
1679 }
1680 
nir_cf_node_cf_tree_first(nir_cf_node * node)1681 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1682 {
1683    switch (node->type) {
1684    case nir_cf_node_function: {
1685       nir_function_impl *impl = nir_cf_node_as_function(node);
1686       return nir_start_block(impl);
1687    }
1688 
1689    case nir_cf_node_if: {
1690       nir_if *if_stmt = nir_cf_node_as_if(node);
1691       return nir_if_first_then_block(if_stmt);
1692    }
1693 
1694    case nir_cf_node_loop: {
1695       nir_loop *loop = nir_cf_node_as_loop(node);
1696       return nir_loop_first_block(loop);
1697    }
1698 
1699    case nir_cf_node_block: {
1700       return nir_cf_node_as_block(node);
1701    }
1702 
1703    default:
1704       unreachable("unknown node type");
1705    }
1706 }
1707 
nir_cf_node_cf_tree_last(nir_cf_node * node)1708 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1709 {
1710    switch (node->type) {
1711    case nir_cf_node_function: {
1712       nir_function_impl *impl = nir_cf_node_as_function(node);
1713       return nir_impl_last_block(impl);
1714    }
1715 
1716    case nir_cf_node_if: {
1717       nir_if *if_stmt = nir_cf_node_as_if(node);
1718       return nir_if_last_else_block(if_stmt);
1719    }
1720 
1721    case nir_cf_node_loop: {
1722       nir_loop *loop = nir_cf_node_as_loop(node);
1723       return nir_loop_last_block(loop);
1724    }
1725 
1726    case nir_cf_node_block: {
1727       return nir_cf_node_as_block(node);
1728    }
1729 
1730    default:
1731       unreachable("unknown node type");
1732    }
1733 }
1734 
nir_cf_node_cf_tree_next(nir_cf_node * node)1735 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1736 {
1737    if (node->type == nir_cf_node_block)
1738       return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1739    else if (node->type == nir_cf_node_function)
1740       return NULL;
1741    else
1742       return nir_cf_node_as_block(nir_cf_node_next(node));
1743 }
1744 
1745 nir_if *
nir_block_get_following_if(nir_block * block)1746 nir_block_get_following_if(nir_block *block)
1747 {
1748    if (exec_node_is_tail_sentinel(&block->cf_node.node))
1749       return NULL;
1750 
1751    if (nir_cf_node_is_last(&block->cf_node))
1752       return NULL;
1753 
1754    nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1755 
1756    if (next_node->type != nir_cf_node_if)
1757       return NULL;
1758 
1759    return nir_cf_node_as_if(next_node);
1760 }
1761 
1762 nir_loop *
nir_block_get_following_loop(nir_block * block)1763 nir_block_get_following_loop(nir_block *block)
1764 {
1765    if (exec_node_is_tail_sentinel(&block->cf_node.node))
1766       return NULL;
1767 
1768    if (nir_cf_node_is_last(&block->cf_node))
1769       return NULL;
1770 
1771    nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1772 
1773    if (next_node->type != nir_cf_node_loop)
1774       return NULL;
1775 
1776    return nir_cf_node_as_loop(next_node);
1777 }
1778 
1779 void
nir_index_blocks(nir_function_impl * impl)1780 nir_index_blocks(nir_function_impl *impl)
1781 {
1782    unsigned index = 0;
1783 
1784    if (impl->valid_metadata & nir_metadata_block_index)
1785       return;
1786 
1787    nir_foreach_block(block, impl) {
1788       block->index = index++;
1789    }
1790 
1791    /* The end_block isn't really part of the program, which is why its index
1792     * is >= num_blocks.
1793     */
1794    impl->num_blocks = impl->end_block->index = index;
1795 }
1796 
1797 static bool
index_ssa_def_cb(nir_ssa_def * def,void * state)1798 index_ssa_def_cb(nir_ssa_def *def, void *state)
1799 {
1800    unsigned *index = (unsigned *) state;
1801    def->index = (*index)++;
1802 
1803    return true;
1804 }
1805 
1806 /**
1807  * The indices are applied top-to-bottom which has the very nice property
1808  * that, if A dominates B, then A->index <= B->index.
1809  */
1810 void
nir_index_ssa_defs(nir_function_impl * impl)1811 nir_index_ssa_defs(nir_function_impl *impl)
1812 {
1813    unsigned index = 0;
1814 
1815    nir_foreach_block(block, impl) {
1816       nir_foreach_instr(instr, block)
1817          nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1818    }
1819 
1820    impl->ssa_alloc = index;
1821 }
1822 
1823 /**
1824  * The indices are applied top-to-bottom which has the very nice property
1825  * that, if A dominates B, then A->index <= B->index.
1826  */
1827 unsigned
nir_index_instrs(nir_function_impl * impl)1828 nir_index_instrs(nir_function_impl *impl)
1829 {
1830    unsigned index = 0;
1831 
1832    nir_foreach_block(block, impl) {
1833       nir_foreach_instr(instr, block)
1834          instr->index = index++;
1835    }
1836 
1837    return index;
1838 }
1839 
1840 unsigned
nir_shader_index_vars(nir_shader * shader,nir_variable_mode modes)1841 nir_shader_index_vars(nir_shader *shader, nir_variable_mode modes)
1842 {
1843    unsigned count = 0;
1844    nir_foreach_variable_with_modes(var, shader, modes)
1845       var->index = count++;
1846    return count;
1847 }
1848 
1849 unsigned
nir_function_impl_index_vars(nir_function_impl * impl)1850 nir_function_impl_index_vars(nir_function_impl *impl)
1851 {
1852    unsigned count = 0;
1853    nir_foreach_function_temp_variable(var, impl)
1854       var->index = count++;
1855    return count;
1856 }
1857 
1858 static nir_instr *
cursor_next_instr(nir_cursor cursor)1859 cursor_next_instr(nir_cursor cursor)
1860 {
1861    switch (cursor.option) {
1862    case nir_cursor_before_block:
1863       for (nir_block *block = cursor.block; block;
1864            block = nir_block_cf_tree_next(block)) {
1865          nir_instr *instr = nir_block_first_instr(block);
1866          if (instr)
1867             return instr;
1868       }
1869       return NULL;
1870 
1871    case nir_cursor_after_block:
1872       cursor.block = nir_block_cf_tree_next(cursor.block);
1873       if (cursor.block == NULL)
1874          return NULL;
1875 
1876       cursor.option = nir_cursor_before_block;
1877       return cursor_next_instr(cursor);
1878 
1879    case nir_cursor_before_instr:
1880       return cursor.instr;
1881 
1882    case nir_cursor_after_instr:
1883       if (nir_instr_next(cursor.instr))
1884          return nir_instr_next(cursor.instr);
1885 
1886       cursor.option = nir_cursor_after_block;
1887       cursor.block = cursor.instr->block;
1888       return cursor_next_instr(cursor);
1889    }
1890 
1891    unreachable("Inavlid cursor option");
1892 }
1893 
1894 ASSERTED static bool
dest_is_ssa(nir_dest * dest,void * _state)1895 dest_is_ssa(nir_dest *dest, void *_state)
1896 {
1897    (void) _state;
1898    return dest->is_ssa;
1899 }
1900 
1901 bool
nir_function_impl_lower_instructions(nir_function_impl * impl,nir_instr_filter_cb filter,nir_lower_instr_cb lower,void * cb_data)1902 nir_function_impl_lower_instructions(nir_function_impl *impl,
1903                                      nir_instr_filter_cb filter,
1904                                      nir_lower_instr_cb lower,
1905                                      void *cb_data)
1906 {
1907    nir_builder b;
1908    nir_builder_init(&b, impl);
1909 
1910    nir_metadata preserved = nir_metadata_block_index |
1911                             nir_metadata_dominance;
1912 
1913    bool progress = false;
1914    nir_cursor iter = nir_before_cf_list(&impl->body);
1915    nir_instr *instr;
1916    while ((instr = cursor_next_instr(iter)) != NULL) {
1917       if (filter && !filter(instr, cb_data)) {
1918          iter = nir_after_instr(instr);
1919          continue;
1920       }
1921 
1922       assert(nir_foreach_dest(instr, dest_is_ssa, NULL));
1923       nir_ssa_def *old_def = nir_instr_ssa_def(instr);
1924       if (old_def == NULL) {
1925          iter = nir_after_instr(instr);
1926          continue;
1927       }
1928 
1929       /* We're about to ask the callback to generate a replacement for instr.
1930        * Save off the uses from instr's SSA def so we know what uses to
1931        * rewrite later.  If we use nir_ssa_def_rewrite_uses, it fails in the
1932        * case where the generated replacement code uses the result of instr
1933        * itself.  If we use nir_ssa_def_rewrite_uses_after (which is the
1934        * normal solution to this problem), it doesn't work well if control-
1935        * flow is inserted as part of the replacement, doesn't handle cases
1936        * where the replacement is something consumed by instr, and suffers
1937        * from performance issues.  This is the only way to 100% guarantee
1938        * that we rewrite the correct set efficiently.
1939        */
1940       struct list_head old_uses, old_if_uses;
1941       list_replace(&old_def->uses, &old_uses);
1942       list_inithead(&old_def->uses);
1943       list_replace(&old_def->if_uses, &old_if_uses);
1944       list_inithead(&old_def->if_uses);
1945 
1946       b.cursor = nir_after_instr(instr);
1947       nir_ssa_def *new_def = lower(&b, instr, cb_data);
1948       if (new_def && new_def != NIR_LOWER_INSTR_PROGRESS) {
1949          assert(old_def != NULL);
1950          if (new_def->parent_instr->block != instr->block)
1951             preserved = nir_metadata_none;
1952 
1953          nir_src new_src = nir_src_for_ssa(new_def);
1954          list_for_each_entry_safe(nir_src, use_src, &old_uses, use_link)
1955             nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1956 
1957          list_for_each_entry_safe(nir_src, use_src, &old_if_uses, use_link)
1958             nir_if_rewrite_condition(use_src->parent_if, new_src);
1959 
1960          if (list_is_empty(&old_def->uses) && list_is_empty(&old_def->if_uses)) {
1961             iter = nir_instr_remove(instr);
1962          } else {
1963             iter = nir_after_instr(instr);
1964          }
1965          progress = true;
1966       } else {
1967          /* We didn't end up lowering after all.  Put the uses back */
1968          if (old_def) {
1969             list_replace(&old_uses, &old_def->uses);
1970             list_replace(&old_if_uses, &old_def->if_uses);
1971          }
1972          iter = nir_after_instr(instr);
1973 
1974          if (new_def == NIR_LOWER_INSTR_PROGRESS)
1975             progress = true;
1976       }
1977    }
1978 
1979    if (progress) {
1980       nir_metadata_preserve(impl, preserved);
1981    } else {
1982       nir_metadata_preserve(impl, nir_metadata_all);
1983    }
1984 
1985    return progress;
1986 }
1987 
1988 bool
nir_shader_lower_instructions(nir_shader * shader,nir_instr_filter_cb filter,nir_lower_instr_cb lower,void * cb_data)1989 nir_shader_lower_instructions(nir_shader *shader,
1990                               nir_instr_filter_cb filter,
1991                               nir_lower_instr_cb lower,
1992                               void *cb_data)
1993 {
1994    bool progress = false;
1995 
1996    nir_foreach_function(function, shader) {
1997       if (function->impl &&
1998           nir_function_impl_lower_instructions(function->impl,
1999                                                filter, lower, cb_data))
2000          progress = true;
2001    }
2002 
2003    return progress;
2004 }
2005 
2006 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val)2007 nir_intrinsic_from_system_value(gl_system_value val)
2008 {
2009    switch (val) {
2010    case SYSTEM_VALUE_VERTEX_ID:
2011       return nir_intrinsic_load_vertex_id;
2012    case SYSTEM_VALUE_INSTANCE_ID:
2013       return nir_intrinsic_load_instance_id;
2014    case SYSTEM_VALUE_DRAW_ID:
2015       return nir_intrinsic_load_draw_id;
2016    case SYSTEM_VALUE_BASE_INSTANCE:
2017       return nir_intrinsic_load_base_instance;
2018    case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
2019       return nir_intrinsic_load_vertex_id_zero_base;
2020    case SYSTEM_VALUE_IS_INDEXED_DRAW:
2021       return nir_intrinsic_load_is_indexed_draw;
2022    case SYSTEM_VALUE_FIRST_VERTEX:
2023       return nir_intrinsic_load_first_vertex;
2024    case SYSTEM_VALUE_BASE_VERTEX:
2025       return nir_intrinsic_load_base_vertex;
2026    case SYSTEM_VALUE_INVOCATION_ID:
2027       return nir_intrinsic_load_invocation_id;
2028    case SYSTEM_VALUE_FRAG_COORD:
2029       return nir_intrinsic_load_frag_coord;
2030    case SYSTEM_VALUE_POINT_COORD:
2031       return nir_intrinsic_load_point_coord;
2032    case SYSTEM_VALUE_LINE_COORD:
2033       return nir_intrinsic_load_line_coord;
2034    case SYSTEM_VALUE_FRONT_FACE:
2035       return nir_intrinsic_load_front_face;
2036    case SYSTEM_VALUE_SAMPLE_ID:
2037       return nir_intrinsic_load_sample_id;
2038    case SYSTEM_VALUE_SAMPLE_POS:
2039       return nir_intrinsic_load_sample_pos;
2040    case SYSTEM_VALUE_SAMPLE_MASK_IN:
2041       return nir_intrinsic_load_sample_mask_in;
2042    case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
2043       return nir_intrinsic_load_local_invocation_id;
2044    case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
2045       return nir_intrinsic_load_local_invocation_index;
2046    case SYSTEM_VALUE_WORK_GROUP_ID:
2047       return nir_intrinsic_load_work_group_id;
2048    case SYSTEM_VALUE_NUM_WORK_GROUPS:
2049       return nir_intrinsic_load_num_work_groups;
2050    case SYSTEM_VALUE_PRIMITIVE_ID:
2051       return nir_intrinsic_load_primitive_id;
2052    case SYSTEM_VALUE_TESS_COORD:
2053       return nir_intrinsic_load_tess_coord;
2054    case SYSTEM_VALUE_TESS_LEVEL_OUTER:
2055       return nir_intrinsic_load_tess_level_outer;
2056    case SYSTEM_VALUE_TESS_LEVEL_INNER:
2057       return nir_intrinsic_load_tess_level_inner;
2058    case SYSTEM_VALUE_TESS_LEVEL_OUTER_DEFAULT:
2059       return nir_intrinsic_load_tess_level_outer_default;
2060    case SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT:
2061       return nir_intrinsic_load_tess_level_inner_default;
2062    case SYSTEM_VALUE_VERTICES_IN:
2063       return nir_intrinsic_load_patch_vertices_in;
2064    case SYSTEM_VALUE_HELPER_INVOCATION:
2065       return nir_intrinsic_load_helper_invocation;
2066    case SYSTEM_VALUE_COLOR0:
2067       return nir_intrinsic_load_color0;
2068    case SYSTEM_VALUE_COLOR1:
2069       return nir_intrinsic_load_color1;
2070    case SYSTEM_VALUE_VIEW_INDEX:
2071       return nir_intrinsic_load_view_index;
2072    case SYSTEM_VALUE_SUBGROUP_SIZE:
2073       return nir_intrinsic_load_subgroup_size;
2074    case SYSTEM_VALUE_SUBGROUP_INVOCATION:
2075       return nir_intrinsic_load_subgroup_invocation;
2076    case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
2077       return nir_intrinsic_load_subgroup_eq_mask;
2078    case SYSTEM_VALUE_SUBGROUP_GE_MASK:
2079       return nir_intrinsic_load_subgroup_ge_mask;
2080    case SYSTEM_VALUE_SUBGROUP_GT_MASK:
2081       return nir_intrinsic_load_subgroup_gt_mask;
2082    case SYSTEM_VALUE_SUBGROUP_LE_MASK:
2083       return nir_intrinsic_load_subgroup_le_mask;
2084    case SYSTEM_VALUE_SUBGROUP_LT_MASK:
2085       return nir_intrinsic_load_subgroup_lt_mask;
2086    case SYSTEM_VALUE_NUM_SUBGROUPS:
2087       return nir_intrinsic_load_num_subgroups;
2088    case SYSTEM_VALUE_SUBGROUP_ID:
2089       return nir_intrinsic_load_subgroup_id;
2090    case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
2091       return nir_intrinsic_load_local_group_size;
2092    case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
2093       return nir_intrinsic_load_global_invocation_id;
2094    case SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX:
2095       return nir_intrinsic_load_global_invocation_index;
2096    case SYSTEM_VALUE_WORK_DIM:
2097       return nir_intrinsic_load_work_dim;
2098    case SYSTEM_VALUE_USER_DATA_AMD:
2099       return nir_intrinsic_load_user_data_amd;
2100    default:
2101       unreachable("system value does not directly correspond to intrinsic");
2102    }
2103 }
2104 
2105 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin)2106 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
2107 {
2108    switch (intrin) {
2109    case nir_intrinsic_load_vertex_id:
2110       return SYSTEM_VALUE_VERTEX_ID;
2111    case nir_intrinsic_load_instance_id:
2112       return SYSTEM_VALUE_INSTANCE_ID;
2113    case nir_intrinsic_load_draw_id:
2114       return SYSTEM_VALUE_DRAW_ID;
2115    case nir_intrinsic_load_base_instance:
2116       return SYSTEM_VALUE_BASE_INSTANCE;
2117    case nir_intrinsic_load_vertex_id_zero_base:
2118       return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
2119    case nir_intrinsic_load_first_vertex:
2120       return SYSTEM_VALUE_FIRST_VERTEX;
2121    case nir_intrinsic_load_is_indexed_draw:
2122       return SYSTEM_VALUE_IS_INDEXED_DRAW;
2123    case nir_intrinsic_load_base_vertex:
2124       return SYSTEM_VALUE_BASE_VERTEX;
2125    case nir_intrinsic_load_invocation_id:
2126       return SYSTEM_VALUE_INVOCATION_ID;
2127    case nir_intrinsic_load_frag_coord:
2128       return SYSTEM_VALUE_FRAG_COORD;
2129    case nir_intrinsic_load_point_coord:
2130       return SYSTEM_VALUE_POINT_COORD;
2131    case nir_intrinsic_load_line_coord:
2132       return SYSTEM_VALUE_LINE_COORD;
2133    case nir_intrinsic_load_front_face:
2134       return SYSTEM_VALUE_FRONT_FACE;
2135    case nir_intrinsic_load_sample_id:
2136       return SYSTEM_VALUE_SAMPLE_ID;
2137    case nir_intrinsic_load_sample_pos:
2138       return SYSTEM_VALUE_SAMPLE_POS;
2139    case nir_intrinsic_load_sample_mask_in:
2140       return SYSTEM_VALUE_SAMPLE_MASK_IN;
2141    case nir_intrinsic_load_local_invocation_id:
2142       return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
2143    case nir_intrinsic_load_local_invocation_index:
2144       return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
2145    case nir_intrinsic_load_num_work_groups:
2146       return SYSTEM_VALUE_NUM_WORK_GROUPS;
2147    case nir_intrinsic_load_work_group_id:
2148       return SYSTEM_VALUE_WORK_GROUP_ID;
2149    case nir_intrinsic_load_primitive_id:
2150       return SYSTEM_VALUE_PRIMITIVE_ID;
2151    case nir_intrinsic_load_tess_coord:
2152       return SYSTEM_VALUE_TESS_COORD;
2153    case nir_intrinsic_load_tess_level_outer:
2154       return SYSTEM_VALUE_TESS_LEVEL_OUTER;
2155    case nir_intrinsic_load_tess_level_inner:
2156       return SYSTEM_VALUE_TESS_LEVEL_INNER;
2157    case nir_intrinsic_load_tess_level_outer_default:
2158       return SYSTEM_VALUE_TESS_LEVEL_OUTER_DEFAULT;
2159    case nir_intrinsic_load_tess_level_inner_default:
2160       return SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT;
2161    case nir_intrinsic_load_patch_vertices_in:
2162       return SYSTEM_VALUE_VERTICES_IN;
2163    case nir_intrinsic_load_helper_invocation:
2164       return SYSTEM_VALUE_HELPER_INVOCATION;
2165    case nir_intrinsic_load_color0:
2166       return SYSTEM_VALUE_COLOR0;
2167    case nir_intrinsic_load_color1:
2168       return SYSTEM_VALUE_COLOR1;
2169    case nir_intrinsic_load_view_index:
2170       return SYSTEM_VALUE_VIEW_INDEX;
2171    case nir_intrinsic_load_subgroup_size:
2172       return SYSTEM_VALUE_SUBGROUP_SIZE;
2173    case nir_intrinsic_load_subgroup_invocation:
2174       return SYSTEM_VALUE_SUBGROUP_INVOCATION;
2175    case nir_intrinsic_load_subgroup_eq_mask:
2176       return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
2177    case nir_intrinsic_load_subgroup_ge_mask:
2178       return SYSTEM_VALUE_SUBGROUP_GE_MASK;
2179    case nir_intrinsic_load_subgroup_gt_mask:
2180       return SYSTEM_VALUE_SUBGROUP_GT_MASK;
2181    case nir_intrinsic_load_subgroup_le_mask:
2182       return SYSTEM_VALUE_SUBGROUP_LE_MASK;
2183    case nir_intrinsic_load_subgroup_lt_mask:
2184       return SYSTEM_VALUE_SUBGROUP_LT_MASK;
2185    case nir_intrinsic_load_num_subgroups:
2186       return SYSTEM_VALUE_NUM_SUBGROUPS;
2187    case nir_intrinsic_load_subgroup_id:
2188       return SYSTEM_VALUE_SUBGROUP_ID;
2189    case nir_intrinsic_load_local_group_size:
2190       return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
2191    case nir_intrinsic_load_global_invocation_id:
2192       return SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
2193    case nir_intrinsic_load_user_data_amd:
2194       return SYSTEM_VALUE_USER_DATA_AMD;
2195    default:
2196       unreachable("intrinsic doesn't produce a system value");
2197    }
2198 }
2199 
2200 /* OpenGL utility method that remaps the location attributes if they are
2201  * doubles. Not needed for vulkan due the differences on the input location
2202  * count for doubles on vulkan vs OpenGL
2203  *
2204  * The bitfield returned in dual_slot is one bit for each double input slot in
2205  * the original OpenGL single-slot input numbering.  The mapping from old
2206  * locations to new locations is as follows:
2207  *
2208  *    new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
2209  */
2210 void
nir_remap_dual_slot_attributes(nir_shader * shader,uint64_t * dual_slot)2211 nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
2212 {
2213    assert(shader->info.stage == MESA_SHADER_VERTEX);
2214 
2215    *dual_slot = 0;
2216    nir_foreach_shader_in_variable(var, shader) {
2217       if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
2218          unsigned slots = glsl_count_attribute_slots(var->type, true);
2219          *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
2220       }
2221    }
2222 
2223    nir_foreach_shader_in_variable(var, shader) {
2224       var->data.location +=
2225          util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
2226    }
2227 }
2228 
2229 /* Returns an attribute mask that has been re-compacted using the given
2230  * dual_slot mask.
2231  */
2232 uint64_t
nir_get_single_slot_attribs_mask(uint64_t attribs,uint64_t dual_slot)2233 nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
2234 {
2235    while (dual_slot) {
2236       unsigned loc = u_bit_scan64(&dual_slot);
2237       /* mask of all bits up to and including loc */
2238       uint64_t mask = BITFIELD64_MASK(loc + 1);
2239       attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
2240    }
2241    return attribs;
2242 }
2243 
2244 void
nir_rewrite_image_intrinsic(nir_intrinsic_instr * intrin,nir_ssa_def * src,bool bindless)2245 nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_ssa_def *src,
2246                             bool bindless)
2247 {
2248    enum gl_access_qualifier access = nir_intrinsic_access(intrin);
2249 
2250    switch (intrin->intrinsic) {
2251 #define CASE(op) \
2252    case nir_intrinsic_image_deref_##op: \
2253       intrin->intrinsic = bindless ? nir_intrinsic_bindless_image_##op \
2254                                    : nir_intrinsic_image_##op; \
2255       break;
2256    CASE(load)
2257    CASE(store)
2258    CASE(atomic_add)
2259    CASE(atomic_imin)
2260    CASE(atomic_umin)
2261    CASE(atomic_imax)
2262    CASE(atomic_umax)
2263    CASE(atomic_and)
2264    CASE(atomic_or)
2265    CASE(atomic_xor)
2266    CASE(atomic_exchange)
2267    CASE(atomic_comp_swap)
2268    CASE(atomic_fadd)
2269    CASE(atomic_inc_wrap)
2270    CASE(atomic_dec_wrap)
2271    CASE(size)
2272    CASE(samples)
2273    CASE(load_raw_intel)
2274    CASE(store_raw_intel)
2275 #undef CASE
2276    default:
2277       unreachable("Unhanded image intrinsic");
2278    }
2279 
2280    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
2281    nir_variable *var = nir_deref_instr_get_variable(deref);
2282 
2283    nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(deref->type));
2284    nir_intrinsic_set_image_array(intrin, glsl_sampler_type_is_array(deref->type));
2285    nir_intrinsic_set_access(intrin, access | var->data.access);
2286    nir_intrinsic_set_format(intrin, var->data.image.format);
2287 
2288    nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
2289                          nir_src_for_ssa(src));
2290 }
2291 
2292 unsigned
nir_image_intrinsic_coord_components(const nir_intrinsic_instr * instr)2293 nir_image_intrinsic_coord_components(const nir_intrinsic_instr *instr)
2294 {
2295    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
2296    int coords = glsl_get_sampler_dim_coordinate_components(dim);
2297    if (dim == GLSL_SAMPLER_DIM_CUBE)
2298       return coords;
2299    else
2300       return coords + nir_intrinsic_image_array(instr);
2301 }
2302