1 /*
2  * Copyright © 2014-2015 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef NIR_BUILDER_H
25 #define NIR_BUILDER_H
26 
27 #include "nir_control_flow.h"
28 #include "util/bitscan.h"
29 #include "util/half_float.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 struct exec_list;
36 
37 typedef struct nir_builder {
38    nir_cursor cursor;
39 
40    /* Whether new ALU instructions will be marked "exact" */
41    bool exact;
42 
43    /* Whether to run divergence analysis on inserted instructions (loop merge
44     * and header phis are not updated). */
45    bool update_divergence;
46 
47    nir_shader *shader;
48    nir_function_impl *impl;
49 } nir_builder;
50 
51 void nir_builder_init(nir_builder *build, nir_function_impl *impl);
52 
53 nir_builder MUST_CHECK PRINTFLIKE(3, 4)
54 nir_builder_init_simple_shader(gl_shader_stage stage,
55                                const nir_shader_compiler_options *options,
56                                const char *name, ...);
57 
58 typedef bool (*nir_instr_pass_cb)(struct nir_builder *, nir_instr *, void *);
59 
60 /**
61  * Iterates over all the instructions in a NIR shader and calls the given pass
62  * on them.
63  *
64  * The pass should return true if it modified the shader.  In that case, only
65  * the preserved metadata flags will be preserved in the function impl.
66  *
67  * The builder will be initialized to point at the function impl, but its
68  * cursor is unset.
69  */
70 static inline bool
nir_shader_instructions_pass(nir_shader * shader,nir_instr_pass_cb pass,nir_metadata preserved,void * cb_data)71 nir_shader_instructions_pass(nir_shader *shader,
72                              nir_instr_pass_cb pass,
73                              nir_metadata preserved,
74                              void *cb_data)
75 {
76    bool progress = false;
77 
78    nir_foreach_function(function, shader) {
79       if (!function->impl)
80          continue;
81 
82       bool func_progress = false;
83       nir_builder b;
84       nir_builder_init(&b, function->impl);
85 
86       nir_foreach_block_safe(block, function->impl) {
87          nir_foreach_instr_safe(instr, block) {
88             func_progress |= pass(&b, instr, cb_data);
89          }
90       }
91 
92       if (func_progress) {
93          nir_metadata_preserve(function->impl, preserved);
94          progress = true;
95       } else {
96          nir_metadata_preserve(function->impl, nir_metadata_all);
97       }
98    }
99 
100    return progress;
101 }
102 
103 void nir_builder_instr_insert(nir_builder *build, nir_instr *instr);
104 
105 static inline nir_instr *
nir_builder_last_instr(nir_builder * build)106 nir_builder_last_instr(nir_builder *build)
107 {
108    assert(build->cursor.option == nir_cursor_after_instr);
109    return build->cursor.instr;
110 }
111 
112 /* General nir_build_alu() taking a variable arg count with NULLs for the rest. */
113 nir_ssa_def *
114 nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
115               nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3);
116 
117 /* Fixed-arg-count variants to reduce size of codegen. */
118 nir_ssa_def *
119 nir_build_alu1(nir_builder *build, nir_op op, nir_ssa_def *src0);
120 nir_ssa_def *
121 nir_build_alu2(nir_builder *build, nir_op op, nir_ssa_def *src0,
122                nir_ssa_def *src1);
123 nir_ssa_def *
124 nir_build_alu3(nir_builder *build, nir_op op, nir_ssa_def *src0,
125                nir_ssa_def *src1, nir_ssa_def *src2);
126 nir_ssa_def *
127 nir_build_alu4(nir_builder *build, nir_op op, nir_ssa_def *src0,
128                nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3);
129 
130 nir_ssa_def *nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs);
131 
132 nir_instr *nir_builder_last_instr(nir_builder *build);
133 
134 void nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf);
135 
136 bool nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node);
137 
138 nir_if *
139 nir_push_if_src(nir_builder *build, nir_src condition);
140 
141 nir_if *
142 nir_push_if(nir_builder *build, nir_ssa_def *condition);
143 
144 nir_if *
145 nir_push_else(nir_builder *build, nir_if *nif);
146 
147 void nir_pop_if(nir_builder *build, nir_if *nif);
148 
149 nir_ssa_def *
150 nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def);
151 
152 nir_loop *
153 nir_push_loop(nir_builder *build);
154 
155 void nir_pop_loop(nir_builder *build, nir_loop *loop);
156 
157 static inline nir_ssa_def *
nir_ssa_undef(nir_builder * build,unsigned num_components,unsigned bit_size)158 nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
159 {
160    nir_ssa_undef_instr *undef =
161       nir_ssa_undef_instr_create(build->shader, num_components, bit_size);
162    if (!undef)
163       return NULL;
164 
165    nir_instr_insert(nir_before_cf_list(&build->impl->body), &undef->instr);
166    if (build->update_divergence)
167       nir_update_instr_divergence(build->shader, &undef->instr);
168 
169    return &undef->def;
170 }
171 
172 static inline nir_ssa_def *
nir_build_imm(nir_builder * build,unsigned num_components,unsigned bit_size,const nir_const_value * value)173 nir_build_imm(nir_builder *build, unsigned num_components,
174               unsigned bit_size, const nir_const_value *value)
175 {
176    nir_load_const_instr *load_const =
177       nir_load_const_instr_create(build->shader, num_components, bit_size);
178    if (!load_const)
179       return NULL;
180 
181    memcpy(load_const->value, value, sizeof(nir_const_value) * num_components);
182 
183    nir_builder_instr_insert(build, &load_const->instr);
184 
185    return &load_const->def;
186 }
187 
188 static inline nir_ssa_def *
nir_imm_zero(nir_builder * build,unsigned num_components,unsigned bit_size)189 nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
190 {
191    nir_load_const_instr *load_const =
192       nir_load_const_instr_create(build->shader, num_components, bit_size);
193 
194    /* nir_load_const_instr_create uses rzalloc so it's already zero */
195 
196    nir_builder_instr_insert(build, &load_const->instr);
197 
198    return &load_const->def;
199 }
200 
201 static inline nir_ssa_def *
nir_imm_boolN_t(nir_builder * build,bool x,unsigned bit_size)202 nir_imm_boolN_t(nir_builder *build, bool x, unsigned bit_size)
203 {
204    nir_const_value v = nir_const_value_for_bool(x, bit_size);
205    return nir_build_imm(build, 1, bit_size, &v);
206 }
207 
208 static inline nir_ssa_def *
nir_imm_bool(nir_builder * build,bool x)209 nir_imm_bool(nir_builder *build, bool x)
210 {
211    return nir_imm_boolN_t(build, x, 1);
212 }
213 
214 static inline nir_ssa_def *
nir_imm_true(nir_builder * build)215 nir_imm_true(nir_builder *build)
216 {
217    return nir_imm_bool(build, true);
218 }
219 
220 static inline nir_ssa_def *
nir_imm_false(nir_builder * build)221 nir_imm_false(nir_builder *build)
222 {
223    return nir_imm_bool(build, false);
224 }
225 
226 static inline nir_ssa_def *
nir_imm_floatN_t(nir_builder * build,double x,unsigned bit_size)227 nir_imm_floatN_t(nir_builder *build, double x, unsigned bit_size)
228 {
229    nir_const_value v = nir_const_value_for_float(x, bit_size);
230    return nir_build_imm(build, 1, bit_size, &v);
231 }
232 
233 static inline nir_ssa_def *
nir_imm_float16(nir_builder * build,float x)234 nir_imm_float16(nir_builder *build, float x)
235 {
236    return nir_imm_floatN_t(build, x, 16);
237 }
238 
239 static inline nir_ssa_def *
nir_imm_float(nir_builder * build,float x)240 nir_imm_float(nir_builder *build, float x)
241 {
242    return nir_imm_floatN_t(build, x, 32);
243 }
244 
245 static inline nir_ssa_def *
nir_imm_double(nir_builder * build,double x)246 nir_imm_double(nir_builder *build, double x)
247 {
248    return nir_imm_floatN_t(build, x, 64);
249 }
250 
251 static inline nir_ssa_def *
nir_imm_vec2(nir_builder * build,float x,float y)252 nir_imm_vec2(nir_builder *build, float x, float y)
253 {
254    nir_const_value v[2] = {
255       nir_const_value_for_float(x, 32),
256       nir_const_value_for_float(y, 32),
257    };
258    return nir_build_imm(build, 2, 32, v);
259 }
260 
261 static inline nir_ssa_def *
nir_imm_vec3(nir_builder * build,float x,float y,float z)262 nir_imm_vec3(nir_builder *build, float x, float y, float z)
263 {
264    nir_const_value v[3] = {
265       nir_const_value_for_float(x, 32),
266       nir_const_value_for_float(y, 32),
267       nir_const_value_for_float(z, 32),
268    };
269    return nir_build_imm(build, 3, 32, v);
270 }
271 
272 static inline nir_ssa_def *
nir_imm_vec4(nir_builder * build,float x,float y,float z,float w)273 nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
274 {
275    nir_const_value v[4] = {
276       nir_const_value_for_float(x, 32),
277       nir_const_value_for_float(y, 32),
278       nir_const_value_for_float(z, 32),
279       nir_const_value_for_float(w, 32),
280    };
281 
282    return nir_build_imm(build, 4, 32, v);
283 }
284 
285 static inline nir_ssa_def *
nir_imm_vec4_16(nir_builder * build,float x,float y,float z,float w)286 nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
287 {
288    nir_const_value v[4] = {
289       nir_const_value_for_float(x, 16),
290       nir_const_value_for_float(y, 16),
291       nir_const_value_for_float(z, 16),
292       nir_const_value_for_float(w, 16),
293    };
294 
295    return nir_build_imm(build, 4, 16, v);
296 }
297 
298 static inline nir_ssa_def *
nir_imm_intN_t(nir_builder * build,uint64_t x,unsigned bit_size)299 nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
300 {
301    nir_const_value v = nir_const_value_for_raw_uint(x, bit_size);
302    return nir_build_imm(build, 1, bit_size, &v);
303 }
304 
305 static inline nir_ssa_def *
nir_imm_int(nir_builder * build,int x)306 nir_imm_int(nir_builder *build, int x)
307 {
308    return nir_imm_intN_t(build, x, 32);
309 }
310 
311 static inline nir_ssa_def *
nir_imm_int64(nir_builder * build,int64_t x)312 nir_imm_int64(nir_builder *build, int64_t x)
313 {
314    return nir_imm_intN_t(build, x, 64);
315 }
316 
317 static inline nir_ssa_def *
nir_imm_ivec2(nir_builder * build,int x,int y)318 nir_imm_ivec2(nir_builder *build, int x, int y)
319 {
320    nir_const_value v[2] = {
321       nir_const_value_for_int(x, 32),
322       nir_const_value_for_int(y, 32),
323    };
324 
325    return nir_build_imm(build, 2, 32, v);
326 }
327 
328 static inline nir_ssa_def *
nir_imm_ivec3(nir_builder * build,int x,int y,int z)329 nir_imm_ivec3(nir_builder *build, int x, int y, int z)
330 {
331    nir_const_value v[3] = {
332       nir_const_value_for_int(x, 32),
333       nir_const_value_for_int(y, 32),
334       nir_const_value_for_int(z, 32),
335    };
336 
337    return nir_build_imm(build, 3, 32, v);
338 }
339 
340 static inline nir_ssa_def *
nir_imm_ivec4(nir_builder * build,int x,int y,int z,int w)341 nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
342 {
343    nir_const_value v[4] = {
344       nir_const_value_for_int(x, 32),
345       nir_const_value_for_int(y, 32),
346       nir_const_value_for_int(z, 32),
347       nir_const_value_for_int(w, 32),
348    };
349 
350    return nir_build_imm(build, 4, 32, v);
351 }
352 
353 nir_ssa_def *
354 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr);
355 
356 /* for the couple special cases with more than 4 src args: */
357 nir_ssa_def *
358 nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs);
359 
360 /* Generic builder for system values. */
361 nir_ssa_def *
362 nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
363                       unsigned num_components, unsigned bit_size);
364 
365 #include "nir_builder_opcodes.h"
366 #undef nir_deref_mode_is
367 
368 static inline nir_ssa_def *
nir_vec(nir_builder * build,nir_ssa_def ** comp,unsigned num_components)369 nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
370 {
371    return nir_build_alu_src_arr(build, nir_op_vec(num_components), comp);
372 }
373 
374 nir_ssa_def *
375 nir_vec_scalars(nir_builder *build, nir_ssa_scalar *comp, unsigned num_components);
376 
377 static inline nir_ssa_def *
nir_mov_alu(nir_builder * build,nir_alu_src src,unsigned num_components)378 nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
379 {
380    assert(!src.abs && !src.negate);
381    if (src.src.is_ssa && src.src.ssa->num_components == num_components) {
382       bool any_swizzles = false;
383       for (unsigned i = 0; i < num_components; i++) {
384          if (src.swizzle[i] != i)
385             any_swizzles = true;
386       }
387       if (!any_swizzles)
388          return src.src.ssa;
389    }
390 
391    nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
392    nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
393                      nir_src_bit_size(src.src), NULL);
394    mov->exact = build->exact;
395    mov->dest.write_mask = (1 << num_components) - 1;
396    mov->src[0] = src;
397    nir_builder_instr_insert(build, &mov->instr);
398 
399    return &mov->dest.dest.ssa;
400 }
401 
402 /**
403  * Construct a mov that reswizzles the source's components.
404  */
405 static inline nir_ssa_def *
nir_swizzle(nir_builder * build,nir_ssa_def * src,const unsigned * swiz,unsigned num_components)406 nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
407             unsigned num_components)
408 {
409    assert(num_components <= NIR_MAX_VEC_COMPONENTS);
410    nir_alu_src alu_src = { NIR_SRC_INIT };
411    alu_src.src = nir_src_for_ssa(src);
412 
413    bool is_identity_swizzle = true;
414    for (unsigned i = 0; i < num_components && i < NIR_MAX_VEC_COMPONENTS; i++) {
415       if (swiz[i] != i)
416          is_identity_swizzle = false;
417       alu_src.swizzle[i] = swiz[i];
418    }
419 
420    if (num_components == src->num_components && is_identity_swizzle)
421       return src;
422 
423    return nir_mov_alu(build, alu_src, num_components);
424 }
425 
426 /* Selects the right fdot given the number of components in each source. */
427 static inline nir_ssa_def *
nir_fdot(nir_builder * build,nir_ssa_def * src0,nir_ssa_def * src1)428 nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
429 {
430    assert(src0->num_components == src1->num_components);
431    switch (src0->num_components) {
432    case 1: return nir_fmul(build, src0, src1);
433    case 2: return nir_fdot2(build, src0, src1);
434    case 3: return nir_fdot3(build, src0, src1);
435    case 4: return nir_fdot4(build, src0, src1);
436    case 5: return nir_fdot5(build, src0, src1);
437    case 8: return nir_fdot8(build, src0, src1);
438    case 16: return nir_fdot16(build, src0, src1);
439    default:
440       unreachable("bad component size");
441    }
442 
443    return NULL;
444 }
445 
446 static inline nir_ssa_def *
nir_ball_iequal(nir_builder * b,nir_ssa_def * src0,nir_ssa_def * src1)447 nir_ball_iequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
448 {
449    switch (src0->num_components) {
450    case 1: return nir_ieq(b, src0, src1);
451    case 2: return nir_ball_iequal2(b, src0, src1);
452    case 3: return nir_ball_iequal3(b, src0, src1);
453    case 4: return nir_ball_iequal4(b, src0, src1);
454    case 5: return nir_ball_iequal5(b, src0, src1);
455    case 8: return nir_ball_iequal8(b, src0, src1);
456    case 16: return nir_ball_iequal16(b, src0, src1);
457    default:
458       unreachable("bad component size");
459    }
460 }
461 
462 static inline nir_ssa_def *
nir_ball(nir_builder * b,nir_ssa_def * src)463 nir_ball(nir_builder *b, nir_ssa_def *src)
464 {
465    return nir_ball_iequal(b, src, nir_imm_true(b));
466 }
467 
468 static inline nir_ssa_def *
nir_bany_inequal(nir_builder * b,nir_ssa_def * src0,nir_ssa_def * src1)469 nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
470 {
471    switch (src0->num_components) {
472    case 1: return nir_ine(b, src0, src1);
473    case 2: return nir_bany_inequal2(b, src0, src1);
474    case 3: return nir_bany_inequal3(b, src0, src1);
475    case 4: return nir_bany_inequal4(b, src0, src1);
476    case 5: return nir_bany_inequal5(b, src0, src1);
477    case 8: return nir_bany_inequal8(b, src0, src1);
478    case 16: return nir_bany_inequal16(b, src0, src1);
479    default:
480       unreachable("bad component size");
481    }
482 }
483 
484 static inline nir_ssa_def *
nir_bany(nir_builder * b,nir_ssa_def * src)485 nir_bany(nir_builder *b, nir_ssa_def *src)
486 {
487    return nir_bany_inequal(b, src, nir_imm_false(b));
488 }
489 
490 static inline nir_ssa_def *
nir_channel(nir_builder * b,nir_ssa_def * def,unsigned c)491 nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
492 {
493    return nir_swizzle(b, def, &c, 1);
494 }
495 
496 static inline nir_ssa_def *
nir_channels(nir_builder * b,nir_ssa_def * def,nir_component_mask_t mask)497 nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
498 {
499    unsigned num_channels = 0, swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
500 
501    for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
502       if ((mask & (1 << i)) == 0)
503          continue;
504       swizzle[num_channels++] = i;
505    }
506 
507    return nir_swizzle(b, def, swizzle, num_channels);
508 }
509 
510 static inline nir_ssa_def *
_nir_select_from_array_helper(nir_builder * b,nir_ssa_def ** arr,nir_ssa_def * idx,unsigned start,unsigned end)511 _nir_select_from_array_helper(nir_builder *b, nir_ssa_def **arr,
512                               nir_ssa_def *idx,
513                               unsigned start, unsigned end)
514 {
515    if (start == end - 1) {
516       return arr[start];
517    } else {
518       unsigned mid = start + (end - start) / 2;
519       return nir_bcsel(b, nir_ilt(b, idx, nir_imm_intN_t(b, mid, idx->bit_size)),
520                        _nir_select_from_array_helper(b, arr, idx, start, mid),
521                        _nir_select_from_array_helper(b, arr, idx, mid, end));
522    }
523 }
524 
525 static inline nir_ssa_def *
nir_select_from_ssa_def_array(nir_builder * b,nir_ssa_def ** arr,unsigned arr_len,nir_ssa_def * idx)526 nir_select_from_ssa_def_array(nir_builder *b, nir_ssa_def **arr,
527                               unsigned arr_len, nir_ssa_def *idx)
528 {
529    return _nir_select_from_array_helper(b, arr, idx, 0, arr_len);
530 }
531 
532 static inline nir_ssa_def *
nir_vector_extract(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * c)533 nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
534 {
535    nir_src c_src = nir_src_for_ssa(c);
536    if (nir_src_is_const(c_src)) {
537       uint64_t c_const = nir_src_as_uint(c_src);
538       if (c_const < vec->num_components)
539          return nir_channel(b, vec, c_const);
540       else
541          return nir_ssa_undef(b, 1, vec->bit_size);
542    } else {
543       nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
544       for (unsigned i = 0; i < vec->num_components; i++)
545          comps[i] = nir_channel(b, vec, i);
546       return nir_select_from_ssa_def_array(b, comps, vec->num_components, c);
547    }
548 }
549 
550 /** Replaces the component of `vec` specified by `c` with `scalar` */
551 static inline nir_ssa_def *
nir_vector_insert_imm(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * scalar,unsigned c)552 nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
553                       nir_ssa_def *scalar, unsigned c)
554 {
555    assert(scalar->num_components == 1);
556    assert(c < vec->num_components);
557 
558    nir_op vec_op = nir_op_vec(vec->num_components);
559    nir_alu_instr *vec_instr = nir_alu_instr_create(b->shader, vec_op);
560 
561    for (unsigned i = 0; i < vec->num_components; i++) {
562       if (i == c) {
563          vec_instr->src[i].src = nir_src_for_ssa(scalar);
564          vec_instr->src[i].swizzle[0] = 0;
565       } else {
566          vec_instr->src[i].src = nir_src_for_ssa(vec);
567          vec_instr->src[i].swizzle[0] = i;
568       }
569    }
570 
571    return nir_builder_alu_instr_finish_and_insert(b, vec_instr);
572 }
573 
574 /** Replaces the component of `vec` specified by `c` with `scalar` */
575 static inline nir_ssa_def *
nir_vector_insert(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * scalar,nir_ssa_def * c)576 nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
577                   nir_ssa_def *c)
578 {
579    assert(scalar->num_components == 1);
580    assert(c->num_components == 1);
581 
582    nir_src c_src = nir_src_for_ssa(c);
583    if (nir_src_is_const(c_src)) {
584       uint64_t c_const = nir_src_as_uint(c_src);
585       if (c_const < vec->num_components)
586          return nir_vector_insert_imm(b, vec, scalar, c_const);
587       else
588          return vec;
589    } else {
590       nir_const_value per_comp_idx_const[NIR_MAX_VEC_COMPONENTS];
591       for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
592          per_comp_idx_const[i] = nir_const_value_for_int(i, c->bit_size);
593       nir_ssa_def *per_comp_idx =
594          nir_build_imm(b, vec->num_components,
595                        c->bit_size, per_comp_idx_const);
596 
597       /* nir_builder will automatically splat out scalars to vectors so an
598        * insert is as simple as "if I'm the channel, replace me with the
599        * scalar."
600        */
601       return nir_bcsel(b, nir_ieq(b, c, per_comp_idx), scalar, vec);
602    }
603 }
604 
605 static inline nir_ssa_def *
nir_i2i(nir_builder * build,nir_ssa_def * x,unsigned dest_bit_size)606 nir_i2i(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
607 {
608    if (x->bit_size == dest_bit_size)
609       return x;
610 
611    switch (dest_bit_size) {
612    case 64: return nir_i2i64(build, x);
613    case 32: return nir_i2i32(build, x);
614    case 16: return nir_i2i16(build, x);
615    case 8:  return nir_i2i8(build, x);
616    default: unreachable("Invalid bit size");
617    }
618 }
619 
620 static inline nir_ssa_def *
nir_u2u(nir_builder * build,nir_ssa_def * x,unsigned dest_bit_size)621 nir_u2u(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
622 {
623    if (x->bit_size == dest_bit_size)
624       return x;
625 
626    switch (dest_bit_size) {
627    case 64: return nir_u2u64(build, x);
628    case 32: return nir_u2u32(build, x);
629    case 16: return nir_u2u16(build, x);
630    case 8:  return nir_u2u8(build, x);
631    default: unreachable("Invalid bit size");
632    }
633 }
634 
635 static inline nir_ssa_def *
nir_iadd_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)636 nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
637 {
638    assert(x->bit_size <= 64);
639    y &= BITFIELD64_MASK(x->bit_size);
640 
641    if (y == 0) {
642       return x;
643    } else {
644       return nir_iadd(build, x, nir_imm_intN_t(build, y, x->bit_size));
645    }
646 }
647 
648 static inline nir_ssa_def *
nir_iadd_imm_nuw(nir_builder * b,nir_ssa_def * x,uint64_t y)649 nir_iadd_imm_nuw(nir_builder *b, nir_ssa_def *x, uint64_t y)
650 {
651    nir_ssa_def *d = nir_iadd_imm(b, x, y);
652    if (d != x && d->parent_instr->type == nir_instr_type_alu)
653       nir_instr_as_alu(d->parent_instr)->no_unsigned_wrap = true;
654    return d;
655 }
656 
657 static inline nir_ssa_def *
nir_iadd_nuw(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y)658 nir_iadd_nuw(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
659 {
660    nir_ssa_def *d = nir_iadd(b, x, y);
661    nir_instr_as_alu(d->parent_instr)->no_unsigned_wrap = true;
662    return d;
663 }
664 
665 static inline nir_ssa_def *
nir_ieq_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)666 nir_ieq_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
667 {
668    return nir_ieq(build, x, nir_imm_intN_t(build, y, x->bit_size));
669 }
670 
671 /* Use nir_iadd(x, -y) for reversing parameter ordering */
672 static inline nir_ssa_def *
nir_isub_imm(nir_builder * build,uint64_t y,nir_ssa_def * x)673 nir_isub_imm(nir_builder *build, uint64_t y, nir_ssa_def *x)
674 {
675    return nir_isub(build, nir_imm_intN_t(build, y, x->bit_size), x);
676 }
677 
678 static inline nir_ssa_def *
_nir_mul_imm(nir_builder * build,nir_ssa_def * x,uint64_t y,bool amul)679 _nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
680 {
681    assert(x->bit_size <= 64);
682    y &= BITFIELD64_MASK(x->bit_size);
683 
684    if (y == 0) {
685       return nir_imm_intN_t(build, 0, x->bit_size);
686    } else if (y == 1) {
687       return x;
688    } else if (!build->shader->options->lower_bitops &&
689               util_is_power_of_two_or_zero64(y)) {
690       return nir_ishl(build, x, nir_imm_int(build, ffsll(y) - 1));
691    } else if (amul) {
692       return nir_amul(build, x, nir_imm_intN_t(build, y, x->bit_size));
693    } else {
694       return nir_imul(build, x, nir_imm_intN_t(build, y, x->bit_size));
695    }
696 }
697 
698 static inline nir_ssa_def *
nir_imul_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)699 nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
700 {
701    return _nir_mul_imm(build, x, y, false);
702 }
703 
704 static inline nir_ssa_def *
nir_amul_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)705 nir_amul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
706 {
707    return _nir_mul_imm(build, x, y, true);
708 }
709 
710 static inline nir_ssa_def *
nir_fadd_imm(nir_builder * build,nir_ssa_def * x,double y)711 nir_fadd_imm(nir_builder *build, nir_ssa_def *x, double y)
712 {
713    return nir_fadd(build, x, nir_imm_floatN_t(build, y, x->bit_size));
714 }
715 
716 static inline nir_ssa_def *
nir_fmul_imm(nir_builder * build,nir_ssa_def * x,double y)717 nir_fmul_imm(nir_builder *build, nir_ssa_def *x, double y)
718 {
719    return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
720 }
721 
722 static inline nir_ssa_def *
nir_iand_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)723 nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
724 {
725    assert(x->bit_size <= 64);
726    y &= BITFIELD64_MASK(x->bit_size);
727 
728    if (y == 0) {
729       return nir_imm_intN_t(build, 0, x->bit_size);
730    } else if (y == BITFIELD64_MASK(x->bit_size)) {
731       return x;
732    } else {
733       return nir_iand(build, x, nir_imm_intN_t(build, y, x->bit_size));
734    }
735 }
736 
737 static inline nir_ssa_def *
nir_ior_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)738 nir_ior_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
739 {
740    assert(x->bit_size <= 64);
741    y &= BITFIELD64_MASK(x->bit_size);
742 
743    if (y == 0) {
744       return x;
745    } else if (y == BITFIELD64_MASK(x->bit_size)) {
746       return nir_imm_intN_t(build, y, x->bit_size);
747    } else
748       return nir_ior(build, x, nir_imm_intN_t(build, y, x->bit_size));
749 }
750 
751 static inline nir_ssa_def *
nir_ishl_imm(nir_builder * build,nir_ssa_def * x,uint32_t y)752 nir_ishl_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
753 {
754    if (y == 0) {
755       return x;
756    } else if (y >= x->bit_size) {
757       return nir_imm_intN_t(build, 0, x->bit_size);
758    } else {
759       return nir_ishl(build, x, nir_imm_int(build, y));
760    }
761 }
762 
763 static inline nir_ssa_def *
nir_ishr_imm(nir_builder * build,nir_ssa_def * x,uint32_t y)764 nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
765 {
766    if (y == 0) {
767       return x;
768    } else {
769       return nir_ishr(build, x, nir_imm_int(build, y));
770    }
771 }
772 
773 static inline nir_ssa_def *
nir_ushr_imm(nir_builder * build,nir_ssa_def * x,uint32_t y)774 nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
775 {
776    if (y == 0) {
777       return x;
778    } else {
779       return nir_ushr(build, x, nir_imm_int(build, y));
780    }
781 }
782 
783 static inline nir_ssa_def *
nir_udiv_imm(nir_builder * build,nir_ssa_def * x,uint64_t y)784 nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
785 {
786    assert(x->bit_size <= 64);
787    y &= BITFIELD64_MASK(x->bit_size);
788 
789    if (y == 1) {
790       return x;
791    } else if (util_is_power_of_two_nonzero(y)) {
792       return nir_ushr_imm(build, x, ffsll(y) - 1);
793    } else {
794       return nir_udiv(build, x, nir_imm_intN_t(build, y, x->bit_size));
795    }
796 }
797 
798 static inline nir_ssa_def *
nir_fclamp(nir_builder * b,nir_ssa_def * x,nir_ssa_def * min_val,nir_ssa_def * max_val)799 nir_fclamp(nir_builder *b,
800            nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
801 {
802    return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
803 }
804 
805 static inline nir_ssa_def *
nir_iclamp(nir_builder * b,nir_ssa_def * x,nir_ssa_def * min_val,nir_ssa_def * max_val)806 nir_iclamp(nir_builder *b,
807            nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
808 {
809    return nir_imin(b, nir_imax(b, x, min_val), max_val);
810 }
811 
812 static inline nir_ssa_def *
nir_uclamp(nir_builder * b,nir_ssa_def * x,nir_ssa_def * min_val,nir_ssa_def * max_val)813 nir_uclamp(nir_builder *b,
814            nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
815 {
816    return nir_umin(b, nir_umax(b, x, min_val), max_val);
817 }
818 
819 static inline nir_ssa_def *
nir_ffma_imm12(nir_builder * build,nir_ssa_def * src0,double src1,double src2)820 nir_ffma_imm12(nir_builder *build, nir_ssa_def *src0, double src1, double src2)
821 {
822    if (build->shader->options->avoid_ternary_with_two_constants)
823       return nir_fadd_imm(build, nir_fmul_imm(build, src0, src1), src2);
824    else
825       return nir_ffma(build, src0, nir_imm_floatN_t(build, src1, src0->bit_size),
826                              nir_imm_floatN_t(build, src2, src0->bit_size));
827 }
828 
829 static inline nir_ssa_def *
nir_ffma_imm1(nir_builder * build,nir_ssa_def * src0,double src1,nir_ssa_def * src2)830 nir_ffma_imm1(nir_builder *build, nir_ssa_def *src0, double src1, nir_ssa_def *src2)
831 {
832    return nir_ffma(build, src0, nir_imm_floatN_t(build, src1, src0->bit_size), src2);
833 }
834 
835 static inline nir_ssa_def *
nir_ffma_imm2(nir_builder * build,nir_ssa_def * src0,nir_ssa_def * src1,double src2)836 nir_ffma_imm2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1, double src2)
837 {
838    return nir_ffma(build, src0, src1, nir_imm_floatN_t(build, src2, src0->bit_size));
839 }
840 
841 static inline nir_ssa_def *
nir_a_minus_bc(nir_builder * build,nir_ssa_def * src0,nir_ssa_def * src1,nir_ssa_def * src2)842 nir_a_minus_bc(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1,
843                nir_ssa_def *src2)
844 {
845    return nir_ffma(build, nir_fneg(build, src1), src2, src0);
846 }
847 
848 static inline nir_ssa_def *
nir_pack_bits(nir_builder * b,nir_ssa_def * src,unsigned dest_bit_size)849 nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
850 {
851    assert(src->num_components * src->bit_size == dest_bit_size);
852 
853    switch (dest_bit_size) {
854    case 64:
855       switch (src->bit_size) {
856       case 32: return nir_pack_64_2x32(b, src);
857       case 16: return nir_pack_64_4x16(b, src);
858       default: break;
859       }
860       break;
861 
862    case 32:
863       if (src->bit_size == 16)
864          return nir_pack_32_2x16(b, src);
865       break;
866 
867    default:
868       break;
869    }
870 
871    /* If we got here, we have no dedicated unpack opcode. */
872    nir_ssa_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
873    for (unsigned i = 0; i < src->num_components; i++) {
874       nir_ssa_def *val = nir_u2u(b, nir_channel(b, src, i), dest_bit_size);
875       val = nir_ishl(b, val, nir_imm_int(b, i * src->bit_size));
876       dest = nir_ior(b, dest, val);
877    }
878    return dest;
879 }
880 
881 static inline nir_ssa_def *
nir_unpack_bits(nir_builder * b,nir_ssa_def * src,unsigned dest_bit_size)882 nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
883 {
884    assert(src->num_components == 1);
885    assert(src->bit_size > dest_bit_size);
886    const unsigned dest_num_components = src->bit_size / dest_bit_size;
887    assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
888 
889    switch (src->bit_size) {
890    case 64:
891       switch (dest_bit_size) {
892       case 32: return nir_unpack_64_2x32(b, src);
893       case 16: return nir_unpack_64_4x16(b, src);
894       default: break;
895       }
896       break;
897 
898    case 32:
899       if (dest_bit_size == 16)
900          return nir_unpack_32_2x16(b, src);
901       break;
902 
903    default:
904       break;
905    }
906 
907    /* If we got here, we have no dedicated unpack opcode. */
908    nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
909    for (unsigned i = 0; i < dest_num_components; i++) {
910       nir_ssa_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
911       dest_comps[i] = nir_u2u(b, val, dest_bit_size);
912    }
913    return nir_vec(b, dest_comps, dest_num_components);
914 }
915 
916 /**
917  * Treats srcs as if it's one big blob of bits and extracts the range of bits
918  * given by
919  *
920  *       [first_bit, first_bit + dest_num_components * dest_bit_size)
921  *
922  * The range can have any alignment or size as long as it's an integer number
923  * of destination components and fits inside the concatenated sources.
924  *
925  * TODO: The one caveat here is that we can't handle byte alignment if 64-bit
926  * values are involved because that would require pack/unpack to/from a vec8
927  * which NIR currently does not support.
928  */
929 static inline nir_ssa_def *
nir_extract_bits(nir_builder * b,nir_ssa_def ** srcs,unsigned num_srcs,unsigned first_bit,unsigned dest_num_components,unsigned dest_bit_size)930 nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
931                  unsigned first_bit,
932                  unsigned dest_num_components, unsigned dest_bit_size)
933 {
934    const unsigned num_bits = dest_num_components * dest_bit_size;
935 
936    /* Figure out the common bit size */
937    unsigned common_bit_size = dest_bit_size;
938    for (unsigned i = 0; i < num_srcs; i++)
939       common_bit_size = MIN2(common_bit_size, srcs[i]->bit_size);
940    if (first_bit > 0)
941       common_bit_size = MIN2(common_bit_size, (1u << (ffs(first_bit) - 1)));
942 
943    /* We don't want to have to deal with 1-bit values */
944    assert(common_bit_size >= 8);
945 
946    nir_ssa_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
947    assert(num_bits / common_bit_size <= ARRAY_SIZE(common_comps));
948 
949    /* First, unpack to the common bit size and select the components from the
950     * source.
951     */
952    int src_idx = -1;
953    unsigned src_start_bit = 0;
954    unsigned src_end_bit = 0;
955    for (unsigned i = 0; i < num_bits / common_bit_size; i++) {
956       const unsigned bit = first_bit + (i * common_bit_size);
957       while (bit >= src_end_bit) {
958          src_idx++;
959          assert(src_idx < (int) num_srcs);
960          src_start_bit = src_end_bit;
961          src_end_bit += srcs[src_idx]->bit_size *
962                         srcs[src_idx]->num_components;
963       }
964       assert(bit >= src_start_bit);
965       assert(bit + common_bit_size <= src_end_bit);
966       const unsigned rel_bit = bit - src_start_bit;
967       const unsigned src_bit_size = srcs[src_idx]->bit_size;
968 
969       nir_ssa_def *comp = nir_channel(b, srcs[src_idx],
970                                       rel_bit / src_bit_size);
971       if (srcs[src_idx]->bit_size > common_bit_size) {
972          nir_ssa_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
973          comp = nir_channel(b, unpacked, (rel_bit % src_bit_size) /
974                                          common_bit_size);
975       }
976       common_comps[i] = comp;
977    }
978 
979    /* Now, re-pack the destination if we have to */
980    if (dest_bit_size > common_bit_size) {
981       unsigned common_per_dest = dest_bit_size / common_bit_size;
982       nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
983       for (unsigned i = 0; i < dest_num_components; i++) {
984          nir_ssa_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
985                                          common_per_dest);
986          dest_comps[i] = nir_pack_bits(b, unpacked, dest_bit_size);
987       }
988       return nir_vec(b, dest_comps, dest_num_components);
989    } else {
990       assert(dest_bit_size == common_bit_size);
991       return nir_vec(b, common_comps, dest_num_components);
992    }
993 }
994 
995 static inline nir_ssa_def *
nir_bitcast_vector(nir_builder * b,nir_ssa_def * src,unsigned dest_bit_size)996 nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
997 {
998    assert((src->bit_size * src->num_components) % dest_bit_size == 0);
999    const unsigned dest_num_components =
1000       (src->bit_size * src->num_components) / dest_bit_size;
1001    assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
1002 
1003    return nir_extract_bits(b, &src, 1, 0, dest_num_components, dest_bit_size);
1004 }
1005 
1006 /**
1007  * Pad a value to N components with undefs of matching bit size.
1008  * If the value already contains >= num_components, it is returned without change.
1009  */
1010 static inline nir_ssa_def *
nir_pad_vector(nir_builder * b,nir_ssa_def * src,unsigned num_components)1011 nir_pad_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
1012 {
1013    assert(src->num_components <= num_components);
1014    if (src->num_components == num_components)
1015       return src;
1016 
1017    nir_ssa_scalar components[NIR_MAX_VEC_COMPONENTS];
1018    nir_ssa_scalar undef = nir_get_ssa_scalar(nir_ssa_undef(b, 1, src->bit_size), 0);
1019    unsigned i = 0;
1020    for (; i < src->num_components; i++)
1021       components[i] = nir_get_ssa_scalar(src, i);
1022    for (; i < num_components; i++)
1023       components[i] = undef;
1024 
1025    return nir_vec_scalars(b, components, num_components);
1026 }
1027 
1028 /**
1029  * Pad a value to N components with copies of the given immediate of matching
1030  * bit size. If the value already contains >= num_components, it is returned
1031  * without change.
1032  */
1033 static inline nir_ssa_def *
nir_pad_vector_imm_int(nir_builder * b,nir_ssa_def * src,uint64_t imm_val,unsigned num_components)1034 nir_pad_vector_imm_int(nir_builder *b, nir_ssa_def *src, uint64_t imm_val,
1035                        unsigned num_components)
1036 {
1037    assert(src->num_components <= num_components);
1038    if (src->num_components == num_components)
1039       return src;
1040 
1041    nir_ssa_scalar components[NIR_MAX_VEC_COMPONENTS];
1042    nir_ssa_scalar imm = nir_get_ssa_scalar(nir_imm_intN_t(b, imm_val, src->bit_size), 0);
1043    unsigned i = 0;
1044    for (; i < src->num_components; i++)
1045       components[i] = nir_get_ssa_scalar(src, i);
1046    for (; i < num_components; i++)
1047       components[i] = imm;
1048 
1049    return nir_vec_scalars(b, components, num_components);
1050 }
1051 
1052 /**
1053  * Pad a value to 4 components with undefs of matching bit size.
1054  * If the value already contains >= 4 components, it is returned without change.
1055  */
1056 static inline nir_ssa_def *
nir_pad_vec4(nir_builder * b,nir_ssa_def * src)1057 nir_pad_vec4(nir_builder *b, nir_ssa_def *src)
1058 {
1059    return nir_pad_vector(b, src, 4);
1060 }
1061 
1062 nir_ssa_def *
1063 nir_ssa_for_src(nir_builder *build, nir_src src, int num_components);
1064 
1065 nir_ssa_def *
1066 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn);
1067 
1068 static inline unsigned
nir_get_ptr_bitsize(nir_shader * shader)1069 nir_get_ptr_bitsize(nir_shader *shader)
1070 {
1071    if (shader->info.stage == MESA_SHADER_KERNEL)
1072       return shader->info.cs.ptr_size;
1073    return 32;
1074 }
1075 
1076 static inline nir_deref_instr *
nir_build_deref_var(nir_builder * build,nir_variable * var)1077 nir_build_deref_var(nir_builder *build, nir_variable *var)
1078 {
1079    nir_deref_instr *deref =
1080       nir_deref_instr_create(build->shader, nir_deref_type_var);
1081 
1082    deref->modes = (nir_variable_mode)var->data.mode;
1083    deref->type = var->type;
1084    deref->var = var;
1085 
1086    nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
1087                      nir_get_ptr_bitsize(build->shader), NULL);
1088 
1089    nir_builder_instr_insert(build, &deref->instr);
1090 
1091    return deref;
1092 }
1093 
1094 static inline nir_deref_instr *
nir_build_deref_array(nir_builder * build,nir_deref_instr * parent,nir_ssa_def * index)1095 nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
1096                       nir_ssa_def *index)
1097 {
1098    assert(glsl_type_is_array(parent->type) ||
1099           glsl_type_is_matrix(parent->type) ||
1100           glsl_type_is_vector(parent->type));
1101 
1102    assert(index->bit_size == parent->dest.ssa.bit_size);
1103 
1104    nir_deref_instr *deref =
1105       nir_deref_instr_create(build->shader, nir_deref_type_array);
1106 
1107    deref->modes = parent->modes;
1108    deref->type = glsl_get_array_element(parent->type);
1109    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1110    deref->arr.index = nir_src_for_ssa(index);
1111 
1112    nir_ssa_dest_init(&deref->instr, &deref->dest,
1113                      parent->dest.ssa.num_components,
1114                      parent->dest.ssa.bit_size, NULL);
1115 
1116    nir_builder_instr_insert(build, &deref->instr);
1117 
1118    return deref;
1119 }
1120 
1121 static inline nir_deref_instr *
nir_build_deref_array_imm(nir_builder * build,nir_deref_instr * parent,int64_t index)1122 nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
1123                           int64_t index)
1124 {
1125    assert(parent->dest.is_ssa);
1126    nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
1127                                          parent->dest.ssa.bit_size);
1128 
1129    return nir_build_deref_array(build, parent, idx_ssa);
1130 }
1131 
1132 static inline nir_deref_instr *
nir_build_deref_ptr_as_array(nir_builder * build,nir_deref_instr * parent,nir_ssa_def * index)1133 nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
1134                              nir_ssa_def *index)
1135 {
1136    assert(parent->deref_type == nir_deref_type_array ||
1137           parent->deref_type == nir_deref_type_ptr_as_array ||
1138           parent->deref_type == nir_deref_type_cast);
1139 
1140    assert(index->bit_size == parent->dest.ssa.bit_size);
1141 
1142    nir_deref_instr *deref =
1143       nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array);
1144 
1145    deref->modes = parent->modes;
1146    deref->type = parent->type;
1147    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1148    deref->arr.index = nir_src_for_ssa(index);
1149 
1150    nir_ssa_dest_init(&deref->instr, &deref->dest,
1151                      parent->dest.ssa.num_components,
1152                      parent->dest.ssa.bit_size, NULL);
1153 
1154    nir_builder_instr_insert(build, &deref->instr);
1155 
1156    return deref;
1157 }
1158 
1159 static inline nir_deref_instr *
nir_build_deref_array_wildcard(nir_builder * build,nir_deref_instr * parent)1160 nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent)
1161 {
1162    assert(glsl_type_is_array(parent->type) ||
1163           glsl_type_is_matrix(parent->type));
1164 
1165    nir_deref_instr *deref =
1166       nir_deref_instr_create(build->shader, nir_deref_type_array_wildcard);
1167 
1168    deref->modes = parent->modes;
1169    deref->type = glsl_get_array_element(parent->type);
1170    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1171 
1172    nir_ssa_dest_init(&deref->instr, &deref->dest,
1173                      parent->dest.ssa.num_components,
1174                      parent->dest.ssa.bit_size, NULL);
1175 
1176    nir_builder_instr_insert(build, &deref->instr);
1177 
1178    return deref;
1179 }
1180 
1181 static inline nir_deref_instr *
nir_build_deref_struct(nir_builder * build,nir_deref_instr * parent,unsigned index)1182 nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
1183                        unsigned index)
1184 {
1185    assert(glsl_type_is_struct_or_ifc(parent->type));
1186 
1187    nir_deref_instr *deref =
1188       nir_deref_instr_create(build->shader, nir_deref_type_struct);
1189 
1190    deref->modes = parent->modes;
1191    deref->type = glsl_get_struct_field(parent->type, index);
1192    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1193    deref->strct.index = index;
1194 
1195    nir_ssa_dest_init(&deref->instr, &deref->dest,
1196                      parent->dest.ssa.num_components,
1197                      parent->dest.ssa.bit_size, NULL);
1198 
1199    nir_builder_instr_insert(build, &deref->instr);
1200 
1201    return deref;
1202 }
1203 
1204 static inline nir_deref_instr *
nir_build_deref_cast(nir_builder * build,nir_ssa_def * parent,nir_variable_mode modes,const struct glsl_type * type,unsigned ptr_stride)1205 nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
1206                      nir_variable_mode modes, const struct glsl_type *type,
1207                      unsigned ptr_stride)
1208 {
1209    nir_deref_instr *deref =
1210       nir_deref_instr_create(build->shader, nir_deref_type_cast);
1211 
1212    deref->modes = modes;
1213    deref->type = type;
1214    deref->parent = nir_src_for_ssa(parent);
1215    deref->cast.ptr_stride = ptr_stride;
1216 
1217    nir_ssa_dest_init(&deref->instr, &deref->dest,
1218                      parent->num_components, parent->bit_size, NULL);
1219 
1220    nir_builder_instr_insert(build, &deref->instr);
1221 
1222    return deref;
1223 }
1224 
1225 static inline nir_deref_instr *
nir_alignment_deref_cast(nir_builder * build,nir_deref_instr * parent,uint32_t align_mul,uint32_t align_offset)1226 nir_alignment_deref_cast(nir_builder *build, nir_deref_instr *parent,
1227                          uint32_t align_mul, uint32_t align_offset)
1228 {
1229    nir_deref_instr *deref =
1230       nir_deref_instr_create(build->shader, nir_deref_type_cast);
1231 
1232    deref->modes = parent->modes;
1233    deref->type = parent->type;
1234    deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1235    deref->cast.ptr_stride = nir_deref_instr_array_stride(deref);
1236    deref->cast.align_mul = align_mul;
1237    deref->cast.align_offset = align_offset;
1238 
1239    nir_ssa_dest_init(&deref->instr, &deref->dest,
1240                      parent->dest.ssa.num_components,
1241                      parent->dest.ssa.bit_size, NULL);
1242 
1243    nir_builder_instr_insert(build, &deref->instr);
1244 
1245    return deref;
1246 }
1247 
1248 /** Returns a deref that follows another but starting from the given parent
1249  *
1250  * The new deref will be the same type and take the same array or struct index
1251  * as the leader deref but it may have a different parent.  This is very
1252  * useful for walking deref paths.
1253  */
1254 static inline nir_deref_instr *
nir_build_deref_follower(nir_builder * b,nir_deref_instr * parent,nir_deref_instr * leader)1255 nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
1256                          nir_deref_instr *leader)
1257 {
1258    /* If the derefs would have the same parent, don't make a new one */
1259    assert(leader->parent.is_ssa);
1260    if (leader->parent.ssa == &parent->dest.ssa)
1261       return leader;
1262 
1263    UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent);
1264 
1265    switch (leader->deref_type) {
1266    case nir_deref_type_var:
1267       unreachable("A var dereference cannot have a parent");
1268       break;
1269 
1270    case nir_deref_type_array:
1271    case nir_deref_type_array_wildcard:
1272       assert(glsl_type_is_matrix(parent->type) ||
1273              glsl_type_is_array(parent->type) ||
1274              (leader->deref_type == nir_deref_type_array &&
1275               glsl_type_is_vector(parent->type)));
1276       assert(glsl_get_length(parent->type) ==
1277              glsl_get_length(leader_parent->type));
1278 
1279       if (leader->deref_type == nir_deref_type_array) {
1280          assert(leader->arr.index.is_ssa);
1281          nir_ssa_def *index = nir_i2i(b, leader->arr.index.ssa,
1282                                          parent->dest.ssa.bit_size);
1283          return nir_build_deref_array(b, parent, index);
1284       } else {
1285          return nir_build_deref_array_wildcard(b, parent);
1286       }
1287 
1288    case nir_deref_type_struct:
1289       assert(glsl_type_is_struct_or_ifc(parent->type));
1290       assert(glsl_get_length(parent->type) ==
1291              glsl_get_length(leader_parent->type));
1292 
1293       return nir_build_deref_struct(b, parent, leader->strct.index);
1294 
1295    default:
1296       unreachable("Invalid deref instruction type");
1297    }
1298 }
1299 
1300 static inline nir_ssa_def *
nir_load_reg(nir_builder * build,nir_register * reg)1301 nir_load_reg(nir_builder *build, nir_register *reg)
1302 {
1303    return nir_ssa_for_src(build, nir_src_for_reg(reg), reg->num_components);
1304 }
1305 
1306 static inline void
nir_store_reg(nir_builder * build,nir_register * reg,nir_ssa_def * def,nir_component_mask_t write_mask)1307 nir_store_reg(nir_builder *build, nir_register *reg,
1308               nir_ssa_def *def, nir_component_mask_t write_mask)
1309 {
1310    assert(reg->num_components == def->num_components);
1311    assert(reg->bit_size == def->bit_size);
1312 
1313    nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
1314    mov->src[0].src = nir_src_for_ssa(def);
1315    mov->dest.dest = nir_dest_for_reg(reg);
1316    mov->dest.write_mask = write_mask & BITFIELD_MASK(reg->num_components);
1317    nir_builder_instr_insert(build, &mov->instr);
1318 }
1319 
1320 static inline nir_ssa_def *
nir_load_deref_with_access(nir_builder * build,nir_deref_instr * deref,enum gl_access_qualifier access)1321 nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1322                            enum gl_access_qualifier access)
1323 {
1324    return nir_build_load_deref(build, glsl_get_vector_elements(deref->type),
1325                                glsl_get_bit_size(deref->type), &deref->dest.ssa,
1326                                access);
1327 }
1328 
1329 #undef nir_load_deref
1330 static inline nir_ssa_def *
nir_load_deref(nir_builder * build,nir_deref_instr * deref)1331 nir_load_deref(nir_builder *build, nir_deref_instr *deref)
1332 {
1333    return nir_load_deref_with_access(build, deref, (enum gl_access_qualifier)0);
1334 }
1335 
1336 static inline void
nir_store_deref_with_access(nir_builder * build,nir_deref_instr * deref,nir_ssa_def * value,unsigned writemask,enum gl_access_qualifier access)1337 nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1338                             nir_ssa_def *value, unsigned writemask,
1339                             enum gl_access_qualifier access)
1340 {
1341    writemask &= (1u << value->num_components) - 1u;
1342    nir_build_store_deref(build, &deref->dest.ssa, value, writemask, access);
1343 }
1344 
1345 #undef nir_store_deref
1346 static inline void
nir_store_deref(nir_builder * build,nir_deref_instr * deref,nir_ssa_def * value,unsigned writemask)1347 nir_store_deref(nir_builder *build, nir_deref_instr *deref,
1348                 nir_ssa_def *value, unsigned writemask)
1349 {
1350    nir_store_deref_with_access(build, deref, value, writemask,
1351                                (enum gl_access_qualifier)0);
1352 }
1353 
1354 static inline void
nir_copy_deref_with_access(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src,enum gl_access_qualifier dest_access,enum gl_access_qualifier src_access)1355 nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
1356                            nir_deref_instr *src,
1357                            enum gl_access_qualifier dest_access,
1358                            enum gl_access_qualifier src_access)
1359 {
1360    nir_build_copy_deref(build, &dest->dest.ssa, &src->dest.ssa, dest_access, src_access);
1361 }
1362 
1363 #undef nir_copy_deref
1364 static inline void
nir_copy_deref(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src)1365 nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src)
1366 {
1367    nir_copy_deref_with_access(build, dest, src,
1368                               (enum gl_access_qualifier) 0,
1369                               (enum gl_access_qualifier) 0);
1370 }
1371 
1372 static inline void
nir_memcpy_deref_with_access(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src,nir_ssa_def * size,enum gl_access_qualifier dest_access,enum gl_access_qualifier src_access)1373 nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
1374                              nir_deref_instr *src, nir_ssa_def *size,
1375                              enum gl_access_qualifier dest_access,
1376                              enum gl_access_qualifier src_access)
1377 {
1378    nir_build_memcpy_deref(build, &dest->dest.ssa, &src->dest.ssa,
1379                           size, dest_access, src_access);
1380 }
1381 
1382 #undef nir_memcpy_deref
1383 static inline void
nir_memcpy_deref(nir_builder * build,nir_deref_instr * dest,nir_deref_instr * src,nir_ssa_def * size)1384 nir_memcpy_deref(nir_builder *build, nir_deref_instr *dest,
1385                  nir_deref_instr *src, nir_ssa_def *size)
1386 {
1387    nir_memcpy_deref_with_access(build, dest, src, size,
1388                                 (enum gl_access_qualifier)0,
1389                                 (enum gl_access_qualifier)0);
1390 }
1391 
1392 static inline nir_ssa_def *
nir_load_var(nir_builder * build,nir_variable * var)1393 nir_load_var(nir_builder *build, nir_variable *var)
1394 {
1395    return nir_load_deref(build, nir_build_deref_var(build, var));
1396 }
1397 
1398 static inline void
nir_store_var(nir_builder * build,nir_variable * var,nir_ssa_def * value,unsigned writemask)1399 nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
1400               unsigned writemask)
1401 {
1402    nir_store_deref(build, nir_build_deref_var(build, var), value, writemask);
1403 }
1404 
1405 static inline void
nir_copy_var(nir_builder * build,nir_variable * dest,nir_variable * src)1406 nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
1407 {
1408    nir_copy_deref(build, nir_build_deref_var(build, dest),
1409                          nir_build_deref_var(build, src));
1410 }
1411 
1412 #undef nir_load_global
1413 static inline nir_ssa_def *
nir_load_global(nir_builder * build,nir_ssa_def * addr,unsigned align,unsigned num_components,unsigned bit_size)1414 nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
1415                 unsigned num_components, unsigned bit_size)
1416 {
1417    nir_intrinsic_instr *load =
1418       nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_global);
1419    load->num_components = num_components;
1420    load->src[0] = nir_src_for_ssa(addr);
1421    nir_intrinsic_set_align(load, align, 0);
1422    nir_ssa_dest_init(&load->instr, &load->dest,
1423                      num_components, bit_size, NULL);
1424    nir_builder_instr_insert(build, &load->instr);
1425    return &load->dest.ssa;
1426 }
1427 
1428 #undef nir_store_global
1429 static inline void
nir_store_global(nir_builder * build,nir_ssa_def * addr,unsigned align,nir_ssa_def * value,nir_component_mask_t write_mask)1430 nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
1431                  nir_ssa_def *value, nir_component_mask_t write_mask)
1432 {
1433    nir_intrinsic_instr *store =
1434       nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_global);
1435    store->num_components = value->num_components;
1436    store->src[0] = nir_src_for_ssa(value);
1437    store->src[1] = nir_src_for_ssa(addr);
1438    nir_intrinsic_set_write_mask(store,
1439       write_mask & BITFIELD_MASK(value->num_components));
1440    nir_intrinsic_set_align(store, align, 0);
1441    nir_builder_instr_insert(build, &store->instr);
1442 }
1443 
1444 #undef nir_load_global_constant
1445 static inline nir_ssa_def *
nir_load_global_constant(nir_builder * build,nir_ssa_def * addr,unsigned align,unsigned num_components,unsigned bit_size)1446 nir_load_global_constant(nir_builder *build, nir_ssa_def *addr, unsigned align,
1447                          unsigned num_components, unsigned bit_size)
1448 {
1449    nir_intrinsic_instr *load =
1450       nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_global_constant);
1451    load->num_components = num_components;
1452    load->src[0] = nir_src_for_ssa(addr);
1453    nir_intrinsic_set_align(load, align, 0);
1454    nir_ssa_dest_init(&load->instr, &load->dest,
1455                      num_components, bit_size, NULL);
1456    nir_builder_instr_insert(build, &load->instr);
1457    return &load->dest.ssa;
1458 }
1459 
1460 #undef nir_load_param
1461 static inline nir_ssa_def *
nir_load_param(nir_builder * build,uint32_t param_idx)1462 nir_load_param(nir_builder *build, uint32_t param_idx)
1463 {
1464    assert(param_idx < build->impl->function->num_params);
1465    nir_parameter *param = &build->impl->function->params[param_idx];
1466    return nir_build_load_param(build, param->num_components, param->bit_size, param_idx);
1467 }
1468 
1469 /**
1470  * This function takes an I/O intrinsic like load/store_input,
1471  * and emits a sequence that calculates the full offset of that instruction,
1472  * including a stride to the base and component offsets.
1473  */
1474 static inline nir_ssa_def *
nir_build_calc_io_offset(nir_builder * b,nir_intrinsic_instr * intrin,nir_ssa_def * base_stride,unsigned component_stride)1475 nir_build_calc_io_offset(nir_builder *b,
1476                          nir_intrinsic_instr *intrin,
1477                          nir_ssa_def *base_stride,
1478                          unsigned component_stride)
1479 {
1480    /* base is the driver_location, which is in slots (1 slot = 4x4 bytes) */
1481    nir_ssa_def *base_op = nir_imul_imm(b, base_stride, nir_intrinsic_base(intrin));
1482 
1483    /* offset should be interpreted in relation to the base,
1484     * so the instruction effectively reads/writes another input/output
1485     * when it has an offset
1486     */
1487    nir_ssa_def *offset_op = nir_imul(b, base_stride, nir_ssa_for_src(b, *nir_get_io_offset_src(intrin), 1));
1488 
1489    /* component is in bytes */
1490    unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
1491 
1492    return nir_iadd_imm_nuw(b, nir_iadd_nuw(b, base_op, offset_op), const_op);
1493 }
1494 
1495 /* calculate a `(1 << value) - 1` in ssa without overflows */
1496 static inline nir_ssa_def *
nir_mask(nir_builder * b,nir_ssa_def * bits,unsigned dst_bit_size)1497 nir_mask(nir_builder *b, nir_ssa_def *bits, unsigned dst_bit_size)
1498 {
1499    return nir_ushr(b, nir_imm_intN_t(b, -1, dst_bit_size),
1500                       nir_isub_imm(b, dst_bit_size, nir_u2u32(b, bits)));
1501 }
1502 
1503 static inline nir_ssa_def *
nir_f2b(nir_builder * build,nir_ssa_def * f)1504 nir_f2b(nir_builder *build, nir_ssa_def *f)
1505 {
1506    return nir_f2b1(build, f);
1507 }
1508 
1509 static inline nir_ssa_def *
nir_i2b(nir_builder * build,nir_ssa_def * i)1510 nir_i2b(nir_builder *build, nir_ssa_def *i)
1511 {
1512    return nir_i2b1(build, i);
1513 }
1514 
1515 static inline nir_ssa_def *
nir_b2f(nir_builder * build,nir_ssa_def * b,uint32_t bit_size)1516 nir_b2f(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1517 {
1518    switch (bit_size) {
1519    case 64: return nir_b2f64(build, b);
1520    case 32: return nir_b2f32(build, b);
1521    case 16: return nir_b2f16(build, b);
1522    default:
1523       unreachable("Invalid bit-size");
1524    };
1525 }
1526 
1527 static inline nir_ssa_def *
nir_b2i(nir_builder * build,nir_ssa_def * b,uint32_t bit_size)1528 nir_b2i(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1529 {
1530    switch (bit_size) {
1531    case 64: return nir_b2i64(build, b);
1532    case 32: return nir_b2i32(build, b);
1533    case 16: return nir_b2i16(build, b);
1534    case 8:  return nir_b2i8(build, b);
1535    default:
1536       unreachable("Invalid bit-size");
1537    };
1538 }
1539 static inline nir_ssa_def *
nir_load_barycentric(nir_builder * build,nir_intrinsic_op op,unsigned interp_mode)1540 nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
1541                      unsigned interp_mode)
1542 {
1543    unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
1544    nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
1545    nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32, NULL);
1546    nir_intrinsic_set_interp_mode(bary, interp_mode);
1547    nir_builder_instr_insert(build, &bary->instr);
1548    return &bary->dest.ssa;
1549 }
1550 
1551 static inline void
nir_jump(nir_builder * build,nir_jump_type jump_type)1552 nir_jump(nir_builder *build, nir_jump_type jump_type)
1553 {
1554    assert(jump_type != nir_jump_goto && jump_type != nir_jump_goto_if);
1555    nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
1556    nir_builder_instr_insert(build, &jump->instr);
1557 }
1558 
1559 static inline void
nir_goto(nir_builder * build,struct nir_block * target)1560 nir_goto(nir_builder *build, struct nir_block *target)
1561 {
1562    assert(!build->impl->structured);
1563    nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto);
1564    jump->target = target;
1565    nir_builder_instr_insert(build, &jump->instr);
1566 }
1567 
1568 static inline void
nir_goto_if(nir_builder * build,struct nir_block * target,nir_src cond,struct nir_block * else_target)1569 nir_goto_if(nir_builder *build, struct nir_block *target, nir_src cond,
1570             struct nir_block *else_target)
1571 {
1572    assert(!build->impl->structured);
1573    nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto_if);
1574    jump->condition = cond;
1575    jump->target = target;
1576    jump->else_target = else_target;
1577    nir_builder_instr_insert(build, &jump->instr);
1578 }
1579 
1580 nir_ssa_def *
1581 nir_compare_func(nir_builder *b, enum compare_func func,
1582                  nir_ssa_def *src0, nir_ssa_def *src1);
1583 
1584 static inline void
nir_scoped_memory_barrier(nir_builder * b,nir_scope scope,nir_memory_semantics semantics,nir_variable_mode modes)1585 nir_scoped_memory_barrier(nir_builder *b,
1586                           nir_scope scope,
1587                           nir_memory_semantics semantics,
1588                           nir_variable_mode modes)
1589 {
1590    nir_scoped_barrier(b, NIR_SCOPE_NONE, scope, semantics, modes);
1591 }
1592 
1593 nir_ssa_def *
1594 nir_type_convert(nir_builder *b,
1595                  nir_ssa_def *src,
1596                  nir_alu_type src_type,
1597                  nir_alu_type dest_type);
1598 
1599 
1600 static inline nir_ssa_def *
nir_convert_to_bit_size(nir_builder * b,nir_ssa_def * src,nir_alu_type type,unsigned bit_size)1601 nir_convert_to_bit_size(nir_builder *b,
1602                     nir_ssa_def *src,
1603                     nir_alu_type type,
1604                     unsigned bit_size)
1605 {
1606    return nir_type_convert(b, src, type, (nir_alu_type) (type | bit_size));
1607 }
1608 
1609 static inline nir_ssa_def *
nir_i2iN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1610 nir_i2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1611 {
1612    return nir_convert_to_bit_size(b, src, nir_type_int, bit_size);
1613 }
1614 
1615 static inline nir_ssa_def *
nir_u2uN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1616 nir_u2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1617 {
1618    return nir_convert_to_bit_size(b, src, nir_type_uint, bit_size);
1619 }
1620 
1621 static inline nir_ssa_def *
nir_b2bN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1622 nir_b2bN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1623 {
1624    return nir_convert_to_bit_size(b, src, nir_type_bool, bit_size);
1625 }
1626 
1627 static inline nir_ssa_def *
nir_f2fN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1628 nir_f2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1629 {
1630    return nir_convert_to_bit_size(b, src, nir_type_float, bit_size);
1631 }
1632 
1633 static inline nir_ssa_def *
nir_i2fN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1634 nir_i2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1635 {
1636    return nir_type_convert(b, src, nir_type_int,
1637          (nir_alu_type) (nir_type_float | bit_size));
1638 }
1639 
1640 static inline nir_ssa_def *
nir_u2fN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1641 nir_u2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1642 {
1643    return nir_type_convert(b, src, nir_type_uint,
1644          (nir_alu_type) (nir_type_float | bit_size));
1645 }
1646 
1647 static inline nir_ssa_def *
nir_f2uN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1648 nir_f2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1649 {
1650    return nir_type_convert(b, src, nir_type_float,
1651          (nir_alu_type) (nir_type_uint | bit_size));
1652 }
1653 
1654 static inline nir_ssa_def *
nir_f2iN(nir_builder * b,nir_ssa_def * src,unsigned bit_size)1655 nir_f2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1656 {
1657    return nir_type_convert(b, src, nir_type_float,
1658          (nir_alu_type) (nir_type_int | bit_size));
1659 }
1660 
1661 #ifdef __cplusplus
1662 } /* extern "C" */
1663 #endif
1664 
1665 #endif /* NIR_BUILDER_H */
1666